diff --git a/.github/workflows/ci-test.yml b/.github/workflows/ci-test.yml index 99286edc..0c531de9 100644 --- a/.github/workflows/ci-test.yml +++ b/.github/workflows/ci-test.yml @@ -13,19 +13,18 @@ jobs: with: path: src/github.com/qiniu/go-sdk ref: ${{ github.ref }} + submodules: recursive - name: Install Go uses: actions/setup-go@v2 with: go-version: '1.10.x' - - name: Format - run: if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then exit 1; fi - working-directory: src/github.com/qiniu/go-sdk - name: Run unit cases run: | set -e rm -rf $GITHUB_WORKSPACE/src/github.com/qiniu/x && git clone -b v1.12.21 --depth 1 https://github.com/qiniu/x.git $GITHUB_WORKSPACE/src/github.com/qiniu/x GOPATH=$GITHUB_WORKSPACE go get golang.org/x/sync/singleflight GOPATH=$GITHUB_WORKSPACE go get github.com/qiniu/dyn + GOPATH=$GITHUB_WORKSPACE go get github.com/gofrs/flock # FIXME special package # github.com/go-playground/validator/v10 @@ -34,23 +33,28 @@ jobs: # new package name don't work in non-module mode rm -rf $GITHUB_WORKSPACE/src/github.com/go-playground/validator/v10 && git clone -b v10.9.0 --depth 1 https://github.com/go-playground/validator.git $GITHUB_WORKSPACE/src/github.com/go-playground/validator/v10 rm -rf $GITHUB_WORKSPACE/src/github.com/universal-translator && git clone -b v0.18.0 --depth 1 https://github.com/go-playground/universal-translator.git $GITHUB_WORKSPACE/src/github.com/go-playground/universal-translator - + rm -rf $GITHUB_WORKSPACE/src/golang.org/x/crypto && git clone -b v0.10.0 --depth 1 https://go.googlesource.com/crypto $GITHUB_WORKSPACE/src/golang.org/x/crypto # GOPATH=$GITHUB_WORKSPACE go get golang.org/x/crypto/sha3 - + rm -rf $GITHUB_WORKSPACE/src/golang.org/x/text && git clone -b v0.10.0 --depth 1 https://github.com/golang/text $GITHUB_WORKSPACE/src/golang.org/x/text # GOPATH=$GITHUB_WORKSPACE go get golang.org/x/text/language - + GOPATH=$GITHUB_WORKSPACE go get github.com/leodido/go-urn GOPATH=$GITHUB_WORKSPACE go get github.com/go-playground/locales + rm -rf $GITHUB_WORKSPACE/src/github.com/dave/jennifer && git clone -b v1.6.1 --depth 1 https://github.com/dave/jennifer $GITHUB_WORKSPACE/src/github.com/dave/jennifer + # GOPATH=$GITHUB_WORKSPACE go get github.com/dave/jennifer + + GOPATH=$GITHUB_WORKSPACE go get github.com/iancoleman/strcase + # github.com/stretchr/testify # newer version require newer go feature rm -rf $GITHUB_WORKSPACE/src/github.com/stretchr/testify && git clone -b v1.6.1 --depth 1 https://github.com/stretchr/testify.git $GITHUB_WORKSPACE/src/github.com/stretchr/testify GOPATH=$GITHUB_WORKSPACE go get github.com/davecgh/go-spew/spew GOPATH=$GITHUB_WORKSPACE go get github.com/pmezard/go-difflib/difflib GOPATH=$GITHUB_WORKSPACE go get gopkg.in/yaml.v3 - + GOPATH=$GITHUB_WORKSPACE make unittest working-directory: src/github.com/qiniu/go-sdk go-mod-test: @@ -59,7 +63,7 @@ jobs: fail-fast: false max-parallel: 1 matrix: - go_version: ['1.11.x', '1.12.x', '1.13.x', '1.14.x', '1.15.x', '1.16.x', '1.17.x', '1.18.x', '1.19.x', '1.20.x'] + go_version: ['1.11.x', '1.12.x', '1.13.x', '1.14.x', '1.15.x', '1.16.x', '1.17.x', '1.18.x', '1.19.x', '1.20.x', '1.21.x'] runs-on: ubuntu-latest permissions: actions: read @@ -70,15 +74,21 @@ jobs: uses: actions/checkout@v2 with: ref: ${{ github.ref }} + submodules: recursive - name: Install Go uses: actions/setup-go@v2 with: go-version: ${{ matrix.go_version }} - name: Format - run: if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then exit 1; fi + run: | + if [ "${{ matrix.go_version }}" = "1.21.x" ]; then + if [ "$(gofmt -s -l . | wc -l)" -gt 0 ]; then + exit 1 + fi + fi - name: Golint run: | - if [ "${{ matrix.go_version }}" = "1.20.x" ]; then + if [ "${{ matrix.go_version }}" = "1.21.x" ]; then set -e go install honnef.co/go/tools/cmd/staticcheck@latest make staticcheck diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000..7ba8378b --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "api-specs"] + path = api-specs + url = https://github.com/qiniu/api-specs.git diff --git a/Makefile b/Makefile index b5519f4c..82868598 100644 --- a/Makefile +++ b/Makefile @@ -6,3 +6,6 @@ unittest: staticcheck: staticcheck -go 1.10 `go list ./... | egrep -v 'examples|sms'` + +generate: + go generate ./storagev2/ diff --git a/api-specs b/api-specs new file mode 160000 index 00000000..a9575511 --- /dev/null +++ b/api-specs @@ -0,0 +1 @@ +Subproject commit a957551107a2e0f74b9b211765c087c79fb561b4 diff --git a/auth/credentials.go b/auth/credentials.go index 0437ccae..94c3c110 100644 --- a/auth/credentials.go +++ b/auth/credentials.go @@ -1,12 +1,11 @@ package auth import ( - "bytes" + "context" "crypto/hmac" "crypto/sha1" "encoding/base64" "fmt" - "io/ioutil" "net/http" "net/textproto" "sort" @@ -14,6 +13,7 @@ import ( api "github.com/qiniu/go-sdk/v7" "github.com/qiniu/go-sdk/v7/conf" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" ) const ( @@ -94,7 +94,7 @@ func collectData(req *http.Request) (data []byte, err error) { err = rErr return } - req.Body = ioutil.NopCloser(bytes.NewReader(s2)) + req.Body = internal_io.NewBytesNopCloser(s2) data = append(data, s2...) } return @@ -173,7 +173,7 @@ func collectDataV2(req *http.Request) (data []byte, err error) { err = rErr return } - req.Body = ioutil.NopCloser(bytes.NewReader(s2)) + req.Body = internal_io.NewBytesNopCloser(s2) data = append(data, s2...) } return @@ -232,3 +232,8 @@ func (ath *Credentials) VerifyCallback(req *http.Request) (bool, error) { return auth == AuthorizationPrefixQBox+token, nil } } + +// Get 实现 CredentialsProvider 接口 +func (c *Credentials) Get(ctx context.Context) (*Credentials, error) { + return c, nil +} diff --git a/client/client.go b/client/client.go index b1a03734..4aacaf49 100644 --- a/client/client.go +++ b/client/client.go @@ -16,12 +16,17 @@ import ( "github.com/qiniu/go-sdk/v7/auth" "github.com/qiniu/go-sdk/v7/conf" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" "github.com/qiniu/go-sdk/v7/internal/log" "github.com/qiniu/go-sdk/v7/reqid" ) var UserAgent = getUserAgentWithAppName("default") -var DefaultClient = Client{&http.Client{Transport: http.DefaultTransport}} +var DefaultClient = Client{ + &http.Client{ + Transport: http.DefaultTransport, + }, +} // 用来打印调试信息 var DebugMode = false @@ -144,7 +149,7 @@ func (r Client) DoRequestWithForm(ctx context.Context, method, reqUrl string, he if headers == nil { headers = http.Header{} } - headers.Add("Content-Type", "application/x-www-form-urlencoded") + headers.Set("Content-Type", conf.CONTENT_TYPE_FORM) requestData := url.Values(data).Encode() if method == "GET" || method == "HEAD" || method == "DELETE" { @@ -170,7 +175,7 @@ func (r Client) DoRequestWithJson(ctx context.Context, method, reqUrl string, he if headers == nil { headers = http.Header{} } - headers.Add("Content-Type", "application/json") + headers.Set("Content-Type", conf.CONTENT_TYPE_JSON) return r.DoRequestWith(ctx, method, reqUrl, headers, bytes.NewReader(reqBody), len(reqBody)) } @@ -194,11 +199,12 @@ func (r Client) Do(ctx context.Context, req *http.Request) (resp *http.Response, // -------------------------------------------------------------------- type ErrorInfo struct { - Err string `json:"error,omitempty"` - Key string `json:"key,omitempty"` - Reqid string `json:"reqid,omitempty"` - Errno int `json:"errno,omitempty"` - Code int `json:"code"` + Err string `json:"error,omitempty"` + ErrorCode string `json:"error_code,omitempty"` + Key string `json:"key,omitempty"` + Reqid string `json:"reqid,omitempty"` + Errno int `json:"errno,omitempty"` + Code int `json:"code"` } func (r *ErrorInfo) ErrorDetail() string { @@ -226,20 +232,21 @@ func (r *ErrorInfo) HttpCode() int { func parseError(e *ErrorInfo, r io.Reader) { - body, err1 := ioutil.ReadAll(r) + body, err1 := internal_io.ReadAll(r) if err1 != nil { e.Err = err1.Error() return } var ret struct { - Err string `json:"error"` - Key string `json:"key"` - Errno int `json:"errno"` + Err string `json:"error"` + Key string `json:"key"` + Errno int `json:"errno"` + ErrorCode string `json:"error_code,omitempty"` } if decodeJsonFromData(body, &ret) == nil && ret.Err != "" { // qiniu error msg style returns here - e.Err, e.Key, e.Errno = ret.Err, ret.Key, ret.Errno + e.Err, e.Key, e.Errno, e.ErrorCode = ret.Err, ret.Key, ret.Errno, ret.ErrorCode return } e.Err = string(body) @@ -264,7 +271,7 @@ func ResponseError(resp *http.Response) error { if ok && strings.HasPrefix(ct[0], "application/json") { parseError(e, resp.Body) } else { - bs, err := ioutil.ReadAll(resp.Body) + bs, err := internal_io.ReadAll(resp.Body) if err != nil { e.Err = fmt.Sprintf("failed to read from response body: %s", err) } else { @@ -279,7 +286,7 @@ func ResponseError(resp *http.Response) error { func CallRet(ctx context.Context, ret interface{}, resp *http.Response) (err error) { defer func() { - io.Copy(ioutil.Discard, resp.Body) + _ = internal_io.SinkAll(resp.Body) resp.Body.Close() }() diff --git a/examples/resume_upload_advanced.go b/examples/resume_upload_advanced.go index 7c86839e..a82c6d08 100644 --- a/examples/resume_upload_advanced.go +++ b/examples/resume_upload_advanced.go @@ -11,7 +11,9 @@ import ( "sync" "context" + "github.com/qiniu/go-sdk/v7/auth" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" "github.com/qiniu/go-sdk/v7/storage" ) @@ -76,7 +78,7 @@ func main() { // 尝试从旧的进度文件中读取进度 recordFp, openErr := os.Open(recordPath) if openErr == nil { - progressBytes, readErr := ioutil.ReadAll(recordFp) + progressBytes, readErr := internal_io.ReadAll(recordFp) if readErr == nil { mErr := json.Unmarshal(progressBytes, &progressRecord) if mErr == nil { diff --git a/go.mod b/go.mod index 6d3c132a..b6d1c3ea 100644 --- a/go.mod +++ b/go.mod @@ -3,15 +3,19 @@ module github.com/qiniu/go-sdk/v7 go 1.14 require ( + github.com/dave/jennifer v1.6.1 github.com/davecgh/go-spew v1.1.1 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect github.com/go-playground/validator/v10 v10.8.0 + github.com/gofrs/flock v0.8.1 + github.com/iancoleman/strcase v0.3.0 github.com/kr/pretty v0.3.0 // indirect github.com/qiniu/dyn v1.3.0 github.com/rogpeppe/go-internal v1.8.0 // indirect github.com/stretchr/testify v1.6.1 golang.org/x/crypto v0.1.0 // indirect golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 + golang.org/x/sys v0.12.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b ) diff --git a/go.sum b/go.sum index afe543b1..4ce1d40c 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,6 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/dave/jennifer v1.6.1 h1:T4T/67t6RAA5AIV6+NP8Uk/BIsXgDoqEowgycdQQLuk= +github.com/dave/jennifer v1.6.1/go.mod h1:nXbxhEmQfOZhWml3D1cDK5M1FLnMSozpbFN/m3RmGZc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -12,6 +14,10 @@ github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/j github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= github.com/go-playground/validator/v10 v10.8.0 h1:1kAa0fCrnpv+QYdkdcRzrRM7AyYs5o8+jZdJCz9xj6k= github.com/go-playground/validator/v10 v10.8.0/go.mod h1:9JhgTzTaE31GZDpH/HSvHiRJrJ3iKAgqqH0Bl/Ocjdk= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -55,8 +61,9 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= diff --git a/internal/cache/cache.go b/internal/cache/cache.go new file mode 100644 index 00000000..6c277f04 --- /dev/null +++ b/internal/cache/cache.go @@ -0,0 +1,403 @@ +package cache + +import ( + "context" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/gofrs/flock" + "golang.org/x/sync/singleflight" +) + +type ( + CacheValue interface { + IsEqual(CacheValue) bool + IsValid() bool + } + + Cache struct { + compactInterval time.Duration + cacheMap map[string]cacheValue + cacheMapMutex sync.Mutex + lastCompactTime time.Time + persistentFile *persistentFile + group singleflight.Group + flushing uint32 + } + + persistentFile struct { + valueType reflect.Type + cacheFilePath string + persistentDuration time.Duration + lastPersistentTime time.Time + handleError func(error) + } + + cacheValue struct { + Value CacheValue `json:"value"` + CreatedAt time.Time `json:"created_at"` + } + + unmarshalledCacheEntry struct { + Key string `json:"key"` + Value json.RawMessage `json:"value"` + CreatedAt time.Time `json:"created_at"` + } +) + +func NewCache(compactInterval time.Duration) *Cache { + return &Cache{ + compactInterval: compactInterval, + cacheMap: make(map[string]cacheValue), + lastCompactTime: time.Now(), + } +} + +func NewPersistentCache( + valueType reflect.Type, + persistentFilePath string, + compactInterval time.Duration, + persistentDuration time.Duration, + handleError func(error), +) (*Cache, error) { + err := os.MkdirAll(filepath.Dir(persistentFilePath), 0700) + if err != nil { + return nil, err + } + unlockFunc, err := lockCachePersistentFile(persistentFilePath, false, handleError) + if err != nil { + return nil, err + } + defer unlockFunc() + + file, closeFunc, err := openCachePersistentFile(persistentFilePath, handleError) + if err != nil { + return nil, err + } + defer closeFunc() + + cacheMap, err := loadCacheMapFrom(valueType, file) + if err != nil { + return nil, err + } + + return &Cache{ + persistentFile: &persistentFile{ + valueType: valueType, + cacheFilePath: persistentFilePath, + persistentDuration: persistentDuration, + lastPersistentTime: time.Now(), + handleError: handleError, + }, + cacheMap: cacheMap, + compactInterval: compactInterval, + lastCompactTime: time.Now(), + }, nil +} + +type GetResult uint8 + +const ( + GetResultFromCache GetResult = 0 + GetResultFromFallback GetResult = 1 + GetResultFromInvalidCache GetResult = 2 + NoResultGot GetResult = 3 +) + +func (cache *Cache) Get(key string, fallback func() (CacheValue, error)) (CacheValue, GetResult) { + cache.cacheMapMutex.Lock() + value, ok := cache.cacheMap[key] + cache.cacheMapMutex.Unlock() + + if ok && value.Value.IsValid() { + return value.Value, GetResultFromCache + } + + newValue, err := cache.doFallback(key, fallback) + if err != nil { + if ok { + return value.Value, GetResultFromInvalidCache + } else { + return nil, NoResultGot + } + } + cache.Set(key, newValue) + return newValue, GetResultFromFallback +} + +func (cache *Cache) doFallback(key string, fallback func() (CacheValue, error)) (CacheValue, error) { + newValue, err, _ := cache.group.Do(key, func() (interface{}, error) { return fallback() }) + if err != nil { + return nil, err + } + return newValue.(CacheValue), nil +} + +func (cache *Cache) Set(key string, value CacheValue) { + if value.IsValid() { + cache.checkType(value) + + now := time.Now() + cache.cacheMapMutex.Lock() + cache.cacheMap[key] = cacheValue{Value: value, CreatedAt: now} + cache.cacheMapMutex.Unlock() + + go cache.flush() + } +} + +func (cache *Cache) Delete(key string) { + cache.cacheMapMutex.Lock() + delete(cache.cacheMap, key) + cache.cacheMapMutex.Unlock() + + go cache.flush() +} + +func (cache *Cache) Clear() { + cache.cacheMapMutex.Lock() + cache.cacheMap = make(map[string]cacheValue) + cache.lastCompactTime = time.Now() + cache.cacheMapMutex.Unlock() + + cache.clearPersistentFile() +} + +func (cache *Cache) clearPersistentFile() { + if pf := cache.persistentFile; pf == nil { + return + } + + var ( + cacheFilePath = cache.persistentFile.cacheFilePath + handleError = cache.persistentFile.handleError + ) + + unlockFunc, err := lockCachePersistentFile(cacheFilePath, true, handleError) + if err != nil { + return + } + defer unlockFunc() + + if err := os.Truncate(cacheFilePath, 0); err != nil { + if handleError != nil { + handleError(err) + } + return + } + + cache.persistentFile.lastPersistentTime = time.Now() +} + +func (cache *Cache) checkType(cacheValue CacheValue) { + if pf := cache.persistentFile; pf != nil { + if cacheValueType := reflect.TypeOf(cacheValue); !cacheValueType.AssignableTo(pf.valueType) { + panic(fmt.Sprintf("cannot assign %s to %s", cacheValueType, pf.valueType)) + } + } +} + +func (cache *Cache) flush() { + if !atomic.CompareAndSwapUint32(&cache.flushing, 0, 1) { + return + } + defer atomic.StoreUint32(&cache.flushing, 0) + + if cache.lastCompactTime.Add(cache.compactInterval).Before(time.Now()) { + cache.doCompact() + cache.lastCompactTime = time.Now() + } + + if pf := cache.persistentFile; pf != nil { + if pf.lastPersistentTime.Add(pf.persistentDuration).Before(time.Now()) { + cache.doPersistent() + pf.lastPersistentTime = time.Now() + } + } +} + +func (cache *Cache) doCompact() { + cache.cacheMapMutex.Lock() + defer cache.cacheMapMutex.Unlock() + + var toDeleted []string + for key, value := range cache.cacheMap { + if !value.Value.IsValid() { + toDeleted = append(toDeleted, key) + } + } + for _, toDeletedKey := range toDeleted { + delete(cache.cacheMap, toDeletedKey) + } +} + +func (cache *Cache) doPersistent() { + var ( + cacheFilePath = cache.persistentFile.cacheFilePath + handleError = cache.persistentFile.handleError + valueType = cache.persistentFile.valueType + ) + + unlockFunc, err := lockCachePersistentFile(cacheFilePath, true, handleError) + if err != nil { + return + } + defer unlockFunc() + + cache.cacheMapMutex.Lock() + defer cache.cacheMapMutex.Unlock() + + file, closeFunc, err := openCachePersistentFile(cacheFilePath, handleError) + if err != nil { + return + } + defer closeFunc() + + newCacheMap, err := loadCacheMapFrom(valueType, file) + if err != nil { + if handleError != nil { + handleError(err) + } + return + } + if isCacheMapEqual(cache.cacheMap, newCacheMap) { + return + } + mergeCacheMap(cache.cacheMap, newCacheMap) + + if _, err = file.Seek(0, io.SeekStart); err != nil { + if handleError != nil { + handleError(err) + } + return + } + if err = file.Truncate(0); err != nil { + if handleError != nil { + handleError(err) + } + return + } + if err = saveCacheMapTo(file, cache.cacheMap); err != nil && handleError != nil { + handleError(err) + } +} + +func loadCacheMapFrom(valueType reflect.Type, r io.Reader) (map[string]cacheValue, error) { + decoder := json.NewDecoder(r) + cacheMap := make(map[string]cacheValue) + for decoder.More() { + var entry unmarshalledCacheEntry + if err := decoder.Decode(&entry); err != nil { + return nil, err + } + ptrValue := reflect.New(valueType) + if err := json.Unmarshal(entry.Value, ptrValue.Interface()); err != nil { + return nil, err + } + value := ptrValue.Elem().Interface().(CacheValue) + if value.IsValid() { + cacheMap[entry.Key] = cacheValue{Value: value, CreatedAt: entry.CreatedAt} + } + } + return cacheMap, nil +} + +func isCacheMapEqual(left, right map[string]cacheValue) bool { + if len(left) != len(right) { + return false + } + leftKeys := sort.StringSlice(getCacheMapKeys(left)) + leftKeys.Sort() + rightKeys := sort.StringSlice(getCacheMapKeys(right)) + rightKeys.Sort() + if !reflect.DeepEqual(leftKeys, rightKeys) { + return false + } + for _, key := range leftKeys { + leftValue := left[key] + rightValue := right[key] + if !leftValue.Value.IsEqual(rightValue.Value) { + return false + } + } + return true +} + +func getCacheMapKeys(m map[string]cacheValue) []string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + return keys +} + +func mergeCacheMap(left, right map[string]cacheValue) { + for newKey, newValue := range right { + existedCacheValue, exists := left[newKey] + if exists && existedCacheValue.CreatedAt.Before(newValue.CreatedAt) || !exists { + left[newKey] = newValue + } + } +} + +func saveCacheMapTo(w io.Writer, m map[string]cacheValue) error { + encoder := json.NewEncoder(w) + for k, v := range m { + rawMessage, err := json.Marshal(v.Value) + if err != nil { + return err + } + if err = encoder.Encode(unmarshalledCacheEntry{ + Key: k, + Value: rawMessage, + CreatedAt: v.CreatedAt, + }); err != nil { + return err + } + } + return nil +} + +func lockCachePersistentFile(lockFilePath string, ex bool, handleError func(error)) (context.CancelFunc, error) { + lockFile := flock.New(lockFilePath) + var err error + if ex { + err = lockFile.Lock() + } else { + err = lockFile.RLock() + } + if err != nil { + if handleError != nil { + handleError(err) + } + return nil, err + } + return func() { + if err := lockFile.Unlock(); err != nil && handleError != nil { + handleError(err) + } + }, nil +} + +func openCachePersistentFile(cacheFile string, handleError func(error)) (*os.File, context.CancelFunc, error) { + file, err := os.OpenFile(cacheFile, os.O_CREATE|os.O_RDWR, 0600) + if err != nil { + if handleError != nil { + handleError(err) + } + return nil, nil, err + } + return file, func() { + if err := file.Close(); err != nil && handleError != nil { + handleError(err) + } + }, nil +} diff --git a/internal/cache/cache_test.go b/internal/cache/cache_test.go new file mode 100644 index 00000000..e1757c8b --- /dev/null +++ b/internal/cache/cache_test.go @@ -0,0 +1,155 @@ +//go:build unit +// +build unit + +package cache + +import ( + "errors" + "io/ioutil" + "os" + "reflect" + "testing" + "time" +) + +type integerCacheValue struct { + Value int `json:"value"` + ExpiredAt time.Time `json:"expired_at"` +} + +func (v integerCacheValue) IsValid() bool { + return time.Now().Before(v.ExpiredAt) +} + +func (left integerCacheValue) IsEqual(rightV CacheValue) bool { + right, ok := rightV.(integerCacheValue) + if !ok { + return false + } + return left.Value == right.Value +} + +func TestCache(t *testing.T) { + cache := NewCache(200 * time.Millisecond) + if value, result := cache.Get("key_1", func() (CacheValue, error) { + return integerCacheValue{Value: 1, ExpiredAt: time.Now().Add(100 * time.Millisecond)}, nil + }); result != GetResultFromFallback { + t.Fatalf("unexpected result: %v", result) + } else if v := value.(integerCacheValue).Value; v != 1 { + t.Fatalf("unexpected cache value: %v", v) + } + + if value, result := cache.Get("key_1", func() (CacheValue, error) { + t.Fatal("should not call this fallback") + return nil, nil + }); result != GetResultFromCache { + t.Fatalf("unexpected result: %v", result) + } else if v := value.(integerCacheValue).Value; v != 1 { + t.Fatalf("unexpected cache value: %v", v) + } + + time.Sleep(150 * time.Millisecond) + if value, result := cache.Get("key_1", func() (CacheValue, error) { + return nil, errors.New("test error") + }); result != GetResultFromInvalidCache { + t.Fatalf("unexpected result: %v", result) + } else if v := value.(integerCacheValue).Value; v != 1 { + t.Fatalf("unexpected cache value: %v", v) + } + time.Sleep(150 * time.Millisecond) + cache.flush() + if _, result := cache.Get("key_1", func() (CacheValue, error) { + return nil, errors.New("test error") + }); result != NoResultGot { + t.Fatalf("unexpected result: %v", result) + } + + if value, result := cache.Get("key_2", func() (CacheValue, error) { + return integerCacheValue{Value: 2, ExpiredAt: time.Now().Add(-100 * time.Millisecond)}, nil + }); result != GetResultFromFallback { + t.Fatalf("unexpected result: %v", result) + } else if v := value.(integerCacheValue).Value; v != 2 { + t.Fatalf("unexpected cache value: %v", v) + } + + if value, result := cache.Get("key_3", func() (CacheValue, error) { + return integerCacheValue{Value: 3, ExpiredAt: time.Now().Add(100 * time.Millisecond)}, nil + }); result != GetResultFromFallback { + t.Fatal("unexpected ok") + } else if v := value.(integerCacheValue).Value; v != 3 { + t.Fatalf("unexpected cache value: %v", v) + } + cache.Delete("key_3") + if _, result := cache.Get("key_3", func() (CacheValue, error) { + return nil, errors.New("test error") + }); result != NoResultGot { + t.Fatalf("key_3 should be deleted") + } + + if value, result := cache.Get("key_4", func() (CacheValue, error) { + return integerCacheValue{Value: 4, ExpiredAt: time.Now().Add(100 * time.Millisecond)}, nil + }); result != GetResultFromFallback { + t.Fatal("unexpected ok") + } else if v := value.(integerCacheValue).Value; v != 4 { + t.Fatalf("unexpected cache value: %v", v) + } + cache.Clear() + if _, result := cache.Get("key_4", func() (CacheValue, error) { + return nil, errors.New("test error") + }); result != NoResultGot { + t.Fatalf("key_4 should be deleted") + } +} + +func TestCachePersist(t *testing.T) { + cacheFile, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer cacheFile.Close() + defer os.Remove(cacheFile.Name()) + + valueType := reflect.TypeOf(integerCacheValue{}) + cache, err := NewPersistentCache( + valueType, + cacheFile.Name(), + 100*time.Millisecond, + 100*time.Millisecond, + func(err error) { + t.Fatalf("no error are expected: %s", err) + }) + if err != nil { + t.Fatal(err) + } + if value, result := cache.Get("key_1", func() (CacheValue, error) { + return integerCacheValue{Value: 1, ExpiredAt: time.Now().Add(200 * time.Millisecond)}, nil + }); result != GetResultFromFallback { + t.Fatal("unexpected ok") + } else if v := value.(integerCacheValue).Value; v != 1 { + t.Fatalf("unexpected cache value: %v", v) + } + + time.Sleep(100 * time.Millisecond) + cache.flush() + + cacheMap, err := loadCacheMapFrom(valueType, cacheFile) + if err != nil { + t.Fatal(err) + } + if value, ok := cacheMap["key_1"]; !ok { + t.Fatalf("key_1 should be existed") + } else if v := value.Value.(integerCacheValue).Value; v != 1 { + t.Fatalf("unexpected cache value: %v", v) + } + + time.Sleep(100 * time.Millisecond) + cache.flush() + + cacheMap, err = loadCacheMapFrom(valueType, cacheFile) + if err != nil { + t.Fatal(err) + } + if len(cacheMap) != 0 { + t.Fatalf("key_1 should be deleted") + } +} diff --git a/internal/clientv2/client.go b/internal/clientv2/client.go index 0cb9ef40..1fd35c0e 100644 --- a/internal/clientv2/client.go +++ b/internal/clientv2/client.go @@ -1,12 +1,11 @@ package clientv2 import ( - "io" - "io/ioutil" "net/http" "sort" clientV1 "github.com/qiniu/go-sdk/v7/client" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" ) type Client interface { @@ -17,7 +16,7 @@ type Handler func(req *http.Request) (*http.Response, error) type client struct { coreClient Client - interceptors []Interceptor + interceptors interceptorList } func NewClient(cli Client, interceptors ...Interceptor) Client { @@ -36,11 +35,6 @@ func NewClient(cli Client, interceptors ...Interceptor) Client { is = append(is, newDebugInterceptor()) sort.Sort(is) - // 反转 - for i, j := 0, len(is)-1; i < j; i, j = i+1, j-1 { - is[i], is[j] = is[j], is[i] - } - return &client{ coreClient: cli, interceptors: is, @@ -51,9 +45,17 @@ func (c *client) Do(req *http.Request) (*http.Response, error) { handler := func(req *http.Request) (*http.Response, error) { return c.coreClient.Do(req) } + var newInterceptorList interceptorList + if intercetorsFromRequest := getIntercetorsFromRequest(req); len(intercetorsFromRequest) == 0 { + newInterceptorList = c.interceptors + } else if len(c.interceptors) == 0 { + newInterceptorList = intercetorsFromRequest + } else { + newInterceptorList = append(c.interceptors, intercetorsFromRequest...) + sort.Sort(newInterceptorList) + } - interceptors := c.interceptors - for _, interceptor := range interceptors { + for _, interceptor := range newInterceptorList { h := handler i := interceptor handler = func(r *http.Request) (*http.Response, error) { @@ -96,7 +98,7 @@ func DoAndDecodeJsonResponse(c Client, options RequestParams, ret interface{}) e resp, err := Do(c, options) defer func() { if resp != nil && resp.Body != nil { - io.Copy(ioutil.Discard, resp.Body) + _ = internal_io.SinkAll(resp.Body) resp.Body.Close() } }() diff --git a/internal/clientv2/client_test.go b/internal/clientv2/client_test.go index 42cff36a..96fc10ec 100644 --- a/internal/clientv2/client_test.go +++ b/internal/clientv2/client_test.go @@ -87,6 +87,6 @@ func TestInterceptor(t *testing.T) { v = resp.Header.Get(headerKey) if v != " -> request-01 -> request-02 -> request-03 -> Do -> response-03 -> response-02 -> response-01" { - t.Fatal() + t.Fatalf("Unexpected header value: %s", v) } } diff --git a/internal/clientv2/context.go b/internal/clientv2/context.go new file mode 100644 index 00000000..d4ca1c84 --- /dev/null +++ b/internal/clientv2/context.go @@ -0,0 +1,31 @@ +package clientv2 + +import ( + "context" + "net/http" + "sort" +) + +type intercetorsContextKey struct{} + +func WithInterceptors(req *http.Request, interceptors ...Interceptor) *http.Request { + newInterceptors, ok := req.Context().Value(intercetorsContextKey{}).(interceptorList) + if !ok { + newInterceptors = interceptorList(interceptors) + } else { + newInterceptors = append(newInterceptors, interceptors...) + } + return req.WithContext(context.WithValue(req.Context(), intercetorsContextKey{}, newInterceptors)) +} + +func getIntercetorsFromRequest(req *http.Request) interceptorList { + if req == nil { + return interceptorList{} + } + interceptors, ok := req.Context().Value(intercetorsContextKey{}).(interceptorList) + if !ok { + return interceptorList{} + } + sort.Sort(interceptors) + return interceptors +} diff --git a/internal/clientv2/interceptor.go b/internal/clientv2/interceptor.go index 0e54a71a..b9c882ea 100644 --- a/internal/clientv2/interceptor.go +++ b/internal/clientv2/interceptor.go @@ -27,7 +27,7 @@ type Interceptor interface { type interceptorList []Interceptor func (l interceptorList) Less(i, j int) bool { - return l[i].Priority() < l[j].Priority() + return l[i].Priority() >= l[j].Priority() } func (l interceptorList) Swap(i, j int) { diff --git a/internal/clientv2/interceptor_auth.go b/internal/clientv2/interceptor_auth.go index c7319830..4fa6f976 100644 --- a/internal/clientv2/interceptor_auth.go +++ b/internal/clientv2/interceptor_auth.go @@ -1,13 +1,14 @@ package clientv2 import ( - "github.com/qiniu/go-sdk/v7/auth" "net/http" + + "github.com/qiniu/go-sdk/v7/auth" ) type AuthConfig struct { - Credentials auth.Credentials // - TokenType auth.TokenType // 不包含上传 + Credentials *auth.Credentials // + TokenType auth.TokenType // 不包含上传 } type authInterceptor struct { @@ -29,9 +30,11 @@ func (interceptor *authInterceptor) Intercept(req *http.Request, handler Handler return handler(req) } - err := interceptor.config.Credentials.AddToken(interceptor.config.TokenType, req) - if err != nil { - return nil, err + if credentials := interceptor.config.Credentials; credentials != nil { + err := credentials.AddToken(interceptor.config.TokenType, req) + if err != nil { + return nil, err + } } return handler(req) diff --git a/internal/clientv2/interceptor_retry_hosts.go b/internal/clientv2/interceptor_retry_hosts.go index fc28a8d9..67d3e538 100644 --- a/internal/clientv2/interceptor_retry_hosts.go +++ b/internal/clientv2/interceptor_retry_hosts.go @@ -1,14 +1,13 @@ package clientv2 import ( - "io" - "io/ioutil" "net/http" "net/url" "strings" "time" "github.com/qiniu/go-sdk/v7/internal/hostprovider" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" ) type HostsRetryConfig struct { @@ -69,7 +68,7 @@ func (interceptor *hostsRetryInterceptor) Intercept(req *http.Request, handler H for i := 0; ; i++ { // Clone 防止后面 Handler 处理对 req 有污染 - reqBefore := cloneReq(req.Context(), req) + reqBefore := cloneReq(req) resp, err = handler(req) if !interceptor.options.RetryConfig.ShouldRetry(reqBefore, resp, err) { @@ -93,7 +92,12 @@ func (interceptor *hostsRetryInterceptor) Intercept(req *http.Request, handler H if pErr != nil { break } - + if index := strings.Index(newHost, "://"); index >= 0 { + newHost = newHost[(index + len("://")):] + } + if index := strings.Index(newHost, "/"); index >= 0 { + newHost = newHost[:index] + } if len(newHost) == 0 { break } @@ -113,7 +117,7 @@ func (interceptor *hostsRetryInterceptor) Intercept(req *http.Request, handler H req = reqBefore if resp != nil && resp.Body != nil { - io.Copy(ioutil.Discard, resp.Body) + _ = internal_io.SinkAll(resp.Body) resp.Body.Close() } diff --git a/internal/clientv2/interceptor_retry_hosts_test.go b/internal/clientv2/interceptor_retry_hosts_test.go index f4098008..c58c5fce 100644 --- a/internal/clientv2/interceptor_retry_hosts_test.go +++ b/internal/clientv2/interceptor_retry_hosts_test.go @@ -4,11 +4,12 @@ package clientv2 import ( - clientV1 "github.com/qiniu/go-sdk/v7/client" - "github.com/qiniu/go-sdk/v7/internal/hostprovider" "net/http" "testing" "time" + + clientV1 "github.com/qiniu/go-sdk/v7/client" + "github.com/qiniu/go-sdk/v7/internal/hostprovider" ) func TestHostsAlwaysRetryInterceptor(t *testing.T) { @@ -67,11 +68,11 @@ func TestHostsAlwaysRetryInterceptor(t *testing.T) { start := time.Now() resp, _ := Do(c, RequestParams{ - Context: nil, - Method: RequestMethodGet, - Url: "https://" + hostA + "/path/123", - Header: nil, - BodyCreator: nil, + Context: nil, + Method: RequestMethodGet, + Url: "https://" + hostA + "/path/123", + Header: nil, + GetBody: nil, }) duration := float32(time.Now().UnixNano()-start.UnixNano()) / 1e9 @@ -149,11 +150,11 @@ func TestHostsNotRetryInterceptor(t *testing.T) { start := time.Now() resp, _ := Do(c, RequestParams{ - Context: nil, - Method: RequestMethodGet, - Url: "https://" + hostA + "/path/123", - Header: nil, - BodyCreator: nil, + Context: nil, + Method: RequestMethodGet, + Url: "https://" + hostA + "/path/123", + Header: nil, + GetBody: nil, }) duration := float32(time.Now().UnixNano()-start.UnixNano()) / 1e9 @@ -225,11 +226,11 @@ func TestHostsRetryInterceptorByRequest(t *testing.T) { c := NewClient(nil, interceptor, hRetryInterceptor, sRetryInterceptor) resp, err := Do(c, RequestParams{ - Context: nil, - Method: RequestMethodGet, - Url: "https://" + hostA, - Header: nil, - BodyCreator: nil, + Context: nil, + Method: RequestMethodGet, + Url: "https://" + hostA, + Header: nil, + GetBody: nil, }) if err != nil { diff --git a/internal/clientv2/interceptor_retry_simple.go b/internal/clientv2/interceptor_retry_simple.go index e19b839d..bf01f4ba 100644 --- a/internal/clientv2/interceptor_retry_simple.go +++ b/internal/clientv2/interceptor_retry_simple.go @@ -2,7 +2,6 @@ package clientv2 import ( "io" - "io/ioutil" "math/rand" "net" "net/http" @@ -13,8 +12,11 @@ import ( "time" clientv1 "github.com/qiniu/go-sdk/v7/client" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" ) +type contextKeyBufferResponse struct{} + type RetryConfig struct { RetryMax int // 最大重试次数 RetryInterval func() time.Duration // 重试时间间隔 @@ -37,9 +39,7 @@ func (c *RetryConfig) init() { } if c.ShouldRetry == nil { - c.ShouldRetry = func(req *http.Request, resp *http.Response, err error) bool { - return isSimpleRetryable(req, resp, err) - } + c.ShouldRetry = isSimpleRetryable } } @@ -61,6 +61,7 @@ func (interceptor *simpleRetryInterceptor) Intercept(req *http.Request, handler if interceptor == nil || req == nil { return handler(req) } + toBufferResponse := req.Context().Value(contextKeyBufferResponse{}) != nil interceptor.config.init() @@ -72,9 +73,15 @@ func (interceptor *simpleRetryInterceptor) Intercept(req *http.Request, handler // 可能会被重试多次 for i := 0; ; i++ { // Clone 防止后面 Handler 处理对 req 有污染 - reqBefore := cloneReq(req.Context(), req) + reqBefore := cloneReq(req) resp, err = handler(req) + if err == nil { + if toBufferResponse { + err = bufferResponse(resp) + } + } + if !interceptor.config.ShouldRetry(reqBefore, resp, err) { return resp, err } @@ -85,7 +92,7 @@ func (interceptor *simpleRetryInterceptor) Intercept(req *http.Request, handler } if resp != nil && resp.Body != nil { - io.Copy(ioutil.Discard, resp.Body) + _ = internal_io.SinkAll(resp.Body) resp.Body.Close() } @@ -98,6 +105,15 @@ func (interceptor *simpleRetryInterceptor) Intercept(req *http.Request, handler return resp, err } +func bufferResponse(resp *http.Response) error { + buffer, err := internal_io.ReadAll(resp.Body) + if err != nil { + return err + } + resp.Body = internal_io.NewBytesNopCloser(buffer) + return nil +} + func isSimpleRetryable(req *http.Request, resp *http.Response, err error) bool { return isRequestRetryable(req) && (isResponseRetryable(resp) || IsErrorRetryable(err)) } diff --git a/internal/clientv2/interceptor_retry_simple_test.go b/internal/clientv2/interceptor_retry_simple_test.go index 7a104e1c..ff70a09f 100644 --- a/internal/clientv2/interceptor_retry_simple_test.go +++ b/internal/clientv2/interceptor_retry_simple_test.go @@ -42,11 +42,11 @@ func TestSimpleAlwaysRetryInterceptor(t *testing.T) { start := time.Now() resp, _ := Do(c, RequestParams{ - Context: nil, - Method: "", - Url: "https://aaa.com", - Header: nil, - BodyCreator: nil, + Context: nil, + Method: "", + Url: "https://aaa.com", + Header: nil, + GetBody: nil, }) duration := float32(time.Now().UnixNano()-start.UnixNano()) / 1e9 @@ -98,11 +98,11 @@ func TestSimpleNotRetryInterceptor(t *testing.T) { start := time.Now() resp, _ := Do(c, RequestParams{ - Context: nil, - Method: "", - Url: "https://aaa.com", - Header: nil, - BodyCreator: nil, + Context: nil, + Method: "", + Url: "https://aaa.com", + Header: nil, + GetBody: nil, }) duration := float32(time.Now().UnixNano()-start.UnixNano()) / 1e9 diff --git a/internal/clientv2/multipart.go b/internal/clientv2/multipart.go new file mode 100644 index 00000000..8ecc2b3b --- /dev/null +++ b/internal/clientv2/multipart.go @@ -0,0 +1,135 @@ +package clientv2 + +import ( + "io" + "mime/multipart" + + "github.com/qiniu/go-sdk/v7/internal/context" + compatible_io "github.com/qiniu/go-sdk/v7/internal/io" +) + +type ( + keyValuePair struct { + key, value string + } + keyFilePair struct { + key, fileName string + stream compatible_io.ReadSeekCloser + } + + MultipartForm struct { + values []keyValuePair + files []keyFilePair + ctx context.Context + cancel context.CancelCauseFunc + w *io.PipeWriter + } + + multipartFormReader struct { + multipartWriter *multipart.Writer + form *MultipartForm + r *io.PipeReader + } +) + +func (f *MultipartForm) SetValue(key, value string) *MultipartForm { + f.values = append(f.values, keyValuePair{key: key, value: value}) + return f +} + +func (f *MultipartForm) SetFile(key, fileName string, stream compatible_io.ReadSeekCloser) *MultipartForm { + f.files = append(f.files, keyFilePair{key: key, fileName: fileName, stream: stream}) + return f +} + +func newMultipartFormReader(form *MultipartForm) *multipartFormReader { + reader := &multipartFormReader{form: form} + reader.r, form.w = io.Pipe() + reader.multipartWriter = multipart.NewWriter(form.w) + + go func(multipartWriter *multipart.Writer, w *io.PipeWriter, ctx context.Context, cancel context.CancelCauseFunc) { + defer w.Close() + defer multipartWriter.Close() + + for _, pair := range form.values { + select { + case <-ctx.Done(): + return + default: + if err := multipartWriter.WriteField(pair.key, pair.value); err != nil { + cancel(err) + return + } + } + } + for _, pair := range form.files { + select { + case <-ctx.Done(): + return + default: + if err := reader.createFormFile(pair.key, pair.fileName, pair.stream); err != nil { + cancel(err) + return + } + } + } + }(reader.multipartWriter, form.w, form.ctx, form.cancel) + + return reader +} + +func (r *multipartFormReader) Read(p []byte) (int, error) { + select { + case <-r.form.ctx.Done(): + return 0, context.Cause(r.form.ctx) + default: + return r.r.Read(p) + } +} + +func (r *multipartFormReader) Close() (err error) { + r.form.cancel(io.ErrClosedPipe) + err = r.r.Close() + for _, pair := range r.form.files { + if e := pair.stream.Close(); e != nil && err == nil { + err = e + } + } + return err +} + +func (r *multipartFormReader) formDataContentType() string { + return r.multipartWriter.FormDataContentType() +} + +func (r *multipartFormReader) createFormFile(fieldName, fileName string, stream compatible_io.ReadSeekCloser) error { + if w, err := r.multipartWriter.CreateFormFile(fieldName, fileName); err != nil { + return err + } else if _, err := io.Copy(w, stream); err != nil { + return err + } + return nil +} + +func GetMultipartFormRequestBody(info *MultipartForm) GetRequestBody { + return func(o *RequestParams) (io.ReadCloser, error) { + if cancel := info.cancel; cancel != nil { + cancel(io.ErrClosedPipe) + } + info.ctx, info.cancel = context.WithCancelCause(context.Background()) + + if w := info.w; w != nil { + w.Close() + info.w = nil + } + + for _, pair := range info.files { + if _, err := pair.stream.Seek(0, io.SeekStart); err != nil { + return nil, err + } + } + r := newMultipartFormReader(info) + o.Header.Add("Content-Type", r.formDataContentType()) + return r, nil + } +} diff --git a/internal/clientv2/multipart_test.go b/internal/clientv2/multipart_test.go new file mode 100644 index 00000000..5296a23f --- /dev/null +++ b/internal/clientv2/multipart_test.go @@ -0,0 +1,132 @@ +//go:build unit +// +build unit + +package clientv2 + +import ( + "bytes" + "crypto/md5" + "io" + "io/ioutil" + "math/rand" + "mime" + "mime/multipart" + "net/http" + "os" + "testing" + "time" +) + +func TestMultipart(t *testing.T) { + file1 := randFile(t, 1024*1024*10) + defer file1.Close() + file2 := randFile(t, 1024*1024*10) + defer file2.Close() + + form := new(MultipartForm). + SetValue("test-1", "value-1"). + SetValue("test-2", "value-2"). + SetFile("test-file-1", "test-file-name-1", file1). + SetFile("test-file-2", "test-file-name-2", file2) + getRequestBody := GetMultipartFormRequestBody(form) + + for i := 0; i < 5; i++ { + header := make(http.Header) + requestBody, err := getRequestBody(&RequestParams{Header: header}) + if err != nil { + t.Fatal(err) + } + reader, err := parseMultipart(requestBody, header) + if err != nil { + t.Fatal(err) + } + if part, err := reader.NextPart(); err != nil { + t.Fatal(err) + } else { + assertPartValue(t, part, "test-1", "value-1") + } + if part, err := reader.NextPart(); err != nil { + t.Fatal(err) + } else { + assertPartValue(t, part, "test-2", "value-2") + } + if part, err := reader.NextPart(); err != nil { + t.Fatal(err) + } else if f, err := os.Open(file1.Name()); err != nil { + t.Fatal(err) + } else { + assertPartFile(t, part, "test-file-1", "test-file-name-1", f) + f.Close() + } + if part, err := reader.NextPart(); err != nil { + t.Fatal(err) + } else if f, err := os.Open(file2.Name()); err != nil { + t.Fatal(err) + } else { + assertPartFile(t, part, "test-file-2", "test-file-name-2", f) + f.Close() + } + if _, err = reader.NextPart(); err != io.EOF { + t.Fatalf("unexpected error: %v", err) + } + } +} + +func randFile(t *testing.T, n int64) *os.File { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + _, err = io.CopyN(file, rand.New(rand.NewSource(time.Now().UnixNano())), 1024*1024*10) + if err != nil { + t.Fatal(err) + } + _, err = file.Seek(0, io.SeekStart) + if err != nil { + t.Fatal(err) + } + return file +} + +func parseMultipart(r io.Reader, header http.Header) (*multipart.Reader, error) { + _, params, err := mime.ParseMediaType(header.Get("Content-Type")) + if err != nil { + return nil, err + } + return multipart.NewReader(r, params["boundary"]), nil +} + +func assertPartValue(t *testing.T, part *multipart.Part, key, value string) { + if part.FormName() != key { + t.Fatalf("unexpected form name: %s != %s", part.FormName(), key) + } + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, part); err != nil { + t.Fatal(err) + } + if buf.String() != value { + t.Fatalf("unexpected form value: %s != %s", buf.String(), value) + } +} + +func assertPartFile(t *testing.T, part *multipart.Part, key, fileName string, value io.Reader) { + if part.FormName() != key { + t.Fatalf("unexpected form name: %s != %s", part.FormName(), key) + } + if part.FileName() != fileName { + t.Fatalf("unexpected file name: %s != %s", part.FileName(), fileName) + } + md5Hasher := md5.New() + if _, err := io.Copy(md5Hasher, part); err != nil { + t.Fatal(err) + } + actualMd5 := md5Hasher.Sum(nil) + md5Hasher.Reset() + if _, err := io.Copy(md5Hasher, value); err != nil { + t.Fatal(err) + } + expectedMd5 := md5Hasher.Sum(nil) + if !bytes.Equal(actualMd5, expectedMd5) { + t.Fatalf("unexpected form file value") + } +} diff --git a/internal/clientv2/request.go b/internal/clientv2/request.go index 0cada2b5..c433dc70 100644 --- a/internal/clientv2/request.go +++ b/internal/clientv2/request.go @@ -7,39 +7,42 @@ import ( "io" "net/http" "net/url" + "strings" + + "github.com/qiniu/go-sdk/v7/conf" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" ) const ( - RequestMethodGet = "GET" - RequestMethodPut = "PUT" - RequestMethodPost = "POST" - RequestMethodHead = "HEAD" - RequestMethodDelete = "DELETE" + RequestMethodGet = http.MethodGet + RequestMethodPut = http.MethodPut + RequestMethodPost = http.MethodPost + RequestMethodHead = http.MethodHead + RequestMethodDelete = http.MethodDelete ) -type RequestBodyCreator func(options *RequestParams) (io.Reader, error) +type GetRequestBody func(options *RequestParams) (io.ReadCloser, error) -func RequestBodyCreatorOfJson(object interface{}) RequestBodyCreator { - body := object - return func(o *RequestParams) (io.Reader, error) { - reqBody, err := json.Marshal(body) - if err != nil { - return nil, err - } - o.Header.Add("Content-Type", "application/json") - return bytes.NewReader(reqBody), nil +func GetJsonRequestBody(object interface{}) (GetRequestBody, error) { + reqBody, err := json.Marshal(object) + if err != nil { + return nil, err } + return func(o *RequestParams) (io.ReadCloser, error) { + o.Header.Set("Content-Type", conf.CONTENT_TYPE_JSON) + return internal_io.NewReadSeekableNopCloser(bytes.NewReader(reqBody)), nil + }, nil } -func RequestBodyCreatorForm(info map[string][]string) RequestBodyCreator { - body := FormStringInfo(info) - return func(o *RequestParams) (io.Reader, error) { - o.Header.Add("Content-Type", "application/x-www-form-urlencoded") - return bytes.NewBufferString(body), nil +func GetFormRequestBody(info map[string][]string) GetRequestBody { + body := formStringInfo(info) + return func(o *RequestParams) (io.ReadCloser, error) { + o.Header.Set("Content-Type", conf.CONTENT_TYPE_FORM) + return internal_io.NewReadSeekableNopCloser(strings.NewReader(body)), nil } } -func FormStringInfo(info map[string][]string) string { +func formStringInfo(info map[string][]string) string { if len(info) == 0 { return "" } @@ -47,11 +50,12 @@ func FormStringInfo(info map[string][]string) string { } type RequestParams struct { - Context context.Context - Method string - Url string - Header http.Header - BodyCreator RequestBodyCreator + Context context.Context + Method string + Url string + Header http.Header + GetBody GetRequestBody + BufferResponse bool } func (o *RequestParams) init() { @@ -67,26 +71,35 @@ func (o *RequestParams) init() { o.Header = http.Header{} } - if o.BodyCreator == nil { - o.BodyCreator = func(options *RequestParams) (io.Reader, error) { + if o.GetBody == nil { + o.GetBody = func(options *RequestParams) (io.ReadCloser, error) { return nil, nil } } } -func NewRequest(options RequestParams) (*http.Request, error) { +func NewRequest(options RequestParams) (req *http.Request, err error) { options.init() - body, cErr := options.BodyCreator(&options) - if cErr != nil { - return nil, cErr - } - - req, err := http.NewRequest(options.Method, options.Url, body) + body, err := options.GetBody(&options) if err != nil { return nil, err } - req = req.WithContext(options.Context) + req, err = http.NewRequest(options.Method, options.Url, body) + if err != nil { + return + } + if options.Context != nil { + req = req.WithContext(options.Context) + } + if options.BufferResponse { + req = req.WithContext(context.WithValue(options.Context, contextKeyBufferResponse{}, struct{}{})) + } req.Header = options.Header - return req, nil + if options.GetBody != nil && body != nil && body != http.NoBody { + req.GetBody = func() (io.ReadCloser, error) { + return options.GetBody(&options) + } + } + return } diff --git a/internal/clientv2/request_compatible.go b/internal/clientv2/request_compatible.go index de10ae28..2343eb21 100644 --- a/internal/clientv2/request_compatible.go +++ b/internal/clientv2/request_compatible.go @@ -1,7 +1,6 @@ package clientv2 import ( - "context" "mime/multipart" "net/http" "net/textproto" @@ -10,12 +9,8 @@ import ( // 此处是为了版本兼容,sdk 支持最低版本为 go1.10, go1.13 提供 req.Clone 方法, // 此处 copy 高版本的 go 标准库方法 -func cloneReq(ctx context.Context, r *http.Request) *http.Request { - if ctx == nil { - panic("nil context") - } - - r2 := r.WithContext(ctx) +func cloneReq(r *http.Request) *http.Request { + r2 := r.WithContext(r.Context()) if r.Header != nil { r2.Header = cloneHeader(r.Header) } diff --git a/internal/clientv2/request_test.go b/internal/clientv2/request_test.go new file mode 100644 index 00000000..b1e4d1cd --- /dev/null +++ b/internal/clientv2/request_test.go @@ -0,0 +1,72 @@ +//go:build unit +// +build unit + +package clientv2 + +import ( + "net/http" + "testing" +) + +func TestGetJsonRequestBody(t *testing.T) { + runTestCase := func(t *testing.T, getBody GetRequestBody) { + params := RequestParams{Header: make(http.Header)} + readCloser, err := getBody(¶ms) + if err != nil { + t.Fatal(err) + } + defer readCloser.Close() + + buf := make([]byte, 1024) + n, err := readCloser.Read(buf) + if err != nil { + t.Fatal(err) + } + if string(buf[:n]) != `{"v":"value","v2":10}` { + t.Fatal("invalid body") + } else if params.Header.Get("Content-Type") != "application/json" { + t.Fatal("invalid header") + } else if err = readCloser.Close(); err != nil { + t.Fatal(err) + } + } + type S struct { + V string `json:"v"` + V2 int `json:"v2"` + } + + getBody, err := GetJsonRequestBody(S{V: "value", V2: 10}) + if err != nil { + t.Fatal(err) + } + runTestCase(t, getBody) + runTestCase(t, getBody) +} + +func TestGetFormRequestBody(t *testing.T) { + runTestCase := func(t *testing.T, getBody GetRequestBody) { + params := RequestParams{Header: make(http.Header)} + readCloser, err := getBody(¶ms) + if err != nil { + t.Fatal(err) + } + defer readCloser.Close() + + buf := make([]byte, 1024) + n, err := readCloser.Read(buf) + if err != nil { + t.Fatal(err) + } + if string(buf[:n]) != `v=value&v2=1&v2=2&v2=3` { + t.Fatal("invalid body") + } else if params.Header.Get("Content-Type") != "application/x-www-form-urlencoded" { + t.Fatal("invalid header") + } else if err = readCloser.Close(); err != nil { + t.Fatal(err) + } + } + + getBody := GetFormRequestBody(map[string][]string{"v": {"value"}, "v2": {"1", "2", "3"}}) + runTestCase(t, getBody) + runTestCase(t, getBody) +} diff --git a/internal/context/context_test.go b/internal/context/context_test.go new file mode 100644 index 00000000..87b475dd --- /dev/null +++ b/internal/context/context_test.go @@ -0,0 +1,42 @@ +//go:build unit +// +build unit + +package context_test + +import ( + offical_context "context" + "io" + "testing" + + "github.com/qiniu/go-sdk/v7/internal/context" +) + +func TestCause(t *testing.T) { + ctx, cancel := context.WithCancelCause(context.Background()) + + select { + case <-ctx.Done(): + t.Fatalf("Expect ctx.Done() is not done") + default: + } + + if err := ctx.Err(); err != nil { + t.Fatalf("Expect ctx.Err() to return nil, but %s", err) + } + + cancel(io.EOF) + + select { + case <-ctx.Done(): + default: + t.Fatalf("Expect ctx.Done() is done") + } + + if err := ctx.Err(); err != offical_context.Canceled { + t.Fatalf("Expect ctx.Err() to return Canceled, but %s", err) + } + + if c := context.Cause(ctx); c != io.EOF { + t.Fatalf("Expect context.Cause(ctx) to return io.EOF, but %T", c) + } +} diff --git a/internal/context/go1.19.go b/internal/context/go1.19.go new file mode 100644 index 00000000..e59387d1 --- /dev/null +++ b/internal/context/go1.19.go @@ -0,0 +1,47 @@ +//go:build !1.20 +// +build !1.20 + +package context + +import ( + "context" + "sync" +) + +type ( + Context = context.Context + CancelCauseFunc func(cause error) + cancelCauseErrorKey struct{} + cancelCauseErrorValue struct { + mutex sync.Mutex + err error + } +) + +func Cause(c Context) error { + if v := c.Value(cancelCauseErrorKey{}); v != nil { + if val, ok := v.(*cancelCauseErrorValue); ok { + val.mutex.Lock() + defer val.mutex.Unlock() + return val.err + } + } + return c.Err() +} + +func WithCancelCause(parent Context) (ctx Context, cancel CancelCauseFunc) { + errWrapper := new(cancelCauseErrorValue) + newCtx, cancelFunc := context.WithCancel(context.WithValue(parent, cancelCauseErrorKey{}, errWrapper)) + return newCtx, func(cause error) { + errWrapper.mutex.Lock() + defer errWrapper.mutex.Unlock() + if errWrapper.err == nil { + errWrapper.err = cause + } + cancelFunc() + } +} + +func Background() Context { + return context.Background() +} diff --git a/internal/context/go1.20.go b/internal/context/go1.20.go new file mode 100644 index 00000000..d7d282b4 --- /dev/null +++ b/internal/context/go1.20.go @@ -0,0 +1,25 @@ +//go:build 1.20 +// +build 1.20 + +package context + +import ( + "context" +) + +type ( + Context = context.Context + CancelCauseFunc = context.CancelCauseFunc +) + +func Cause(c Context) error { + return Cause(c) +} + +func WithCancelCause(parent Context) (ctx Context, cancel CancelCauseFunc) { + return context.WithCancelCause(parent) +} + +func Background() Context { + return context.Background() +} diff --git a/internal/hostprovider/host_provider.go b/internal/hostprovider/host_provider.go index d9233566..338778bc 100644 --- a/internal/hostprovider/host_provider.go +++ b/internal/hostprovider/host_provider.go @@ -2,14 +2,28 @@ package hostprovider import ( "errors" - "github.com/qiniu/go-sdk/v7/internal/freezer" "time" + + "github.com/qiniu/go-sdk/v7/internal/freezer" ) -type HostProvider interface { - Provider() (string, error) - Freeze(host string, cause error, duration time.Duration) error -} +var ( + ErrNoHostFound = errors.New("no host found") + ErrAllHostsFrozen = errors.New("all hosts are frozen") +) + +type ( + HostProvider interface { + Provider() (string, error) + Freeze(host string, cause error, duration time.Duration) error + } + + arrayHostProvider struct { + hosts []string + freezer freezer.Freezer + lastFreezeErr error + } +) func NewWithHosts(hosts []string) HostProvider { return &arrayHostProvider{ @@ -18,15 +32,9 @@ func NewWithHosts(hosts []string) HostProvider { } } -type arrayHostProvider struct { - hosts []string - freezer freezer.Freezer - lastFreezeErr error -} - func (a *arrayHostProvider) Provider() (string, error) { if len(a.hosts) == 0 { - return "", errors.New("no host found") + return "", ErrNoHostFound } for _, host := range a.hosts { @@ -38,7 +46,7 @@ func (a *arrayHostProvider) Provider() (string, error) { if a.lastFreezeErr != nil { return "", a.lastFreezeErr } else { - return "", errors.New("all hosts are frozen") + return "", ErrAllHostsFrozen } } diff --git a/internal/io/compatible.go b/internal/io/compatible.go new file mode 100644 index 00000000..ab0abcbe --- /dev/null +++ b/internal/io/compatible.go @@ -0,0 +1,58 @@ +package io + +import ( + "errors" + "io" +) + +func MakeReadSeekCloserFromReader(r io.Reader) ReadSeekCloser { + return &readSeekCloserFromReader{r: r} +} + +type readSeekCloserFromReader struct { + r io.Reader +} + +func (r *readSeekCloserFromReader) Read(p []byte) (int, error) { + return r.r.Read(p) +} + +func (r *readSeekCloserFromReader) Seek(offset int64, whence int) (int64, error) { + if seeker, ok := r.r.(io.Seeker); ok { + return seeker.Seek(offset, whence) + } + return 0, errors.New("not support seek") +} + +func (r *readSeekCloserFromReader) Close() error { + return nil +} + +func MakeReadSeekCloserFromLimitedReader(r io.Reader, size int64) ReadSeekCloser { + return &sizedReadSeekCloserFromReader{r: io.LimitedReader{R: r, N: size}, size: size} +} + +type sizedReadSeekCloserFromReader struct { + r io.LimitedReader + size int64 +} + +func (r *sizedReadSeekCloserFromReader) Read(p []byte) (int, error) { + return r.r.Read(p) +} + +func (r *sizedReadSeekCloserFromReader) Seek(offset int64, whence int) (int64, error) { + if seeker, ok := r.r.R.(io.ReadSeeker); ok { + newPos, err := seeker.Seek(offset, whence) + if err != nil { + return newPos, err + } + r.r.N = r.size - newPos + return newPos, nil + } + return 0, errors.New("not support seek") +} + +func (r *sizedReadSeekCloserFromReader) Close() error { + return nil +} diff --git a/internal/io/compatible_test.go b/internal/io/compatible_test.go new file mode 100644 index 00000000..406c98f4 --- /dev/null +++ b/internal/io/compatible_test.go @@ -0,0 +1,97 @@ +//go:build unit +// +build unit + +package io_test + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "testing" + + internal_io "github.com/qiniu/go-sdk/v7/internal/io" +) + +func TestMakeReadSeekCloserFromReader(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + defer file.Close() + + for i := 0; i < 16; i++ { + if _, err = file.Write([]byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF}); err != nil { + t.Fatal(err) + } + } + if _, err = file.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + + buf := make([]byte, 16) + + reader := internal_io.MakeReadSeekCloserFromReader(file) + if n, err := reader.Read(buf); err != nil { + t.Fatal(err) + } else if !bytes.Equal(buf[:n], []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF}) { + t.Fatal("unexpected read content") + } + if _, err = reader.Seek(8, io.SeekStart); err != nil { + t.Fatal(err) + } + if n, err := reader.Read(buf); err != nil { + t.Fatal(err) + } else if !bytes.Equal(buf[:n], []byte{0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7}) { + t.Fatal("unexpected read content") + } + if _, err = reader.Seek(-8, io.SeekEnd); err != nil { + t.Fatal(err) + } + if n, err := reader.Read(buf); err != nil { + t.Fatal(err) + } else if !bytes.Equal(buf[:n], []byte{0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF}) { + t.Fatal("unexpected read content") + } + if err = reader.Close(); err != nil { + t.Fatal(err) + } +} + +func MakeReadSeekCloserFromLimitedReader(t *testing.T) { + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + defer file.Close() + + for i := 0; i < 16; i++ { + if _, err = file.Write([]byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF}); err != nil { + t.Fatal(err) + } + } + + buf := make([]byte, 16) + + reader := internal_io.MakeReadSeekCloserFromLimitedReader(file, 16) + if n, err := reader.Read(buf); err != nil { + t.Fatal(err) + } else if !bytes.Equal(buf[:n], []byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF}) { + t.Fatal("unexpected read content") + } + + if _, err = reader.Seek(8, io.SeekStart); err != nil { + t.Fatal(err) + } + if n, err := reader.Read(buf); err != nil { + t.Fatal(err) + } else if !bytes.Equal(buf[:n], []byte{0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF}) { + t.Fatal("unexpected read content") + } + + if err = reader.Close(); err != nil { + t.Fatal(err) + } +} diff --git a/internal/io/go1.15.go b/internal/io/go1.15.go new file mode 100644 index 00000000..58eb427b --- /dev/null +++ b/internal/io/go1.15.go @@ -0,0 +1,14 @@ +//go:build !1.16 +// +build !1.16 + +package io + +import ( + "io" +) + +type ReadSeekCloser interface { + io.Reader + io.Seeker + io.Closer +} diff --git a/internal/io/go1.16.go b/internal/io/go1.16.go new file mode 100644 index 00000000..6b97ed78 --- /dev/null +++ b/internal/io/go1.16.go @@ -0,0 +1,10 @@ +//go:build 1.16 +// +build 1.16 + +package io + +import ( + "io" +) + +type ReadSeekCloser = io.ReadSeekCloser diff --git a/internal/io/io.go b/internal/io/io.go new file mode 100644 index 00000000..b59783f3 --- /dev/null +++ b/internal/io/io.go @@ -0,0 +1,34 @@ +package io + +import ( + "bytes" + "io" + "io/ioutil" + "strings" +) + +func ReadAll(r io.Reader) ([]byte, error) { + switch b := r.(type) { + case *BytesNopCloser: + _, err := b.Seek(0, io.SeekEnd) + return b.Bytes(), err + default: + return ioutil.ReadAll(r) + } +} + +func SinkAll(r io.Reader) (err error) { + switch b := r.(type) { + case *BytesNopCloser: + _, err = b.Seek(0, io.SeekEnd) + case *bytes.Buffer: + b.Truncate(0) + case *bytes.Reader: + _, err = b.Seek(0, io.SeekEnd) + case *strings.Reader: + _, err = b.Seek(0, io.SeekEnd) + default: + _, err = io.Copy(ioutil.Discard, r) + } + return +} diff --git a/internal/io/io_test.go b/internal/io/io_test.go new file mode 100644 index 00000000..3f77acd6 --- /dev/null +++ b/internal/io/io_test.go @@ -0,0 +1,91 @@ +//go:build unit +// +build unit + +package io_test + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "testing" + + internal_io "github.com/qiniu/go-sdk/v7/internal/io" +) + +func TestReadAll(t *testing.T) { + runTestCase := func(t *testing.T, r io.Reader, expected []byte) { + if b, err := internal_io.ReadAll(r); err != nil { + t.Fatal(err) + } else if !bytes.Equal(expected, b) { + t.Fatalf("unexpected read content: b=%#v, expected=%#v", b, expected) + } else if n, err := r.Read(make([]byte, 1)); err != nil && err != io.EOF { + t.Fatal(err) + } else if n != 0 { + t.Fatal("unexpected read size") + } + } + buf := new(bytes.Buffer) + for i := 0; i < 16; i++ { + if _, err := buf.Write([]byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF}); err != nil { + t.Fatal(err) + } + } + expected := buf.Bytes() + runTestCase(t, buf, expected) + + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + defer file.Close() + + if _, err = io.Copy(file, bytes.NewReader(expected)); err != nil { + t.Fatal(err) + } else if _, err = file.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + runTestCase(t, file, expected) + + bytesNopCloser := internal_io.NewBytesNopCloser(expected) + runTestCase(t, bytesNopCloser, expected) +} + +func TestSinkAll(t *testing.T) { + runTestCase := func(t *testing.T, r io.Reader) { + if err := internal_io.SinkAll(r); err != nil { + t.Fatal(err) + } else if n, err := r.Read(make([]byte, 1)); err != nil && err != io.EOF { + t.Fatal(err) + } else if n != 0 { + t.Fatal("unexpected read size") + } + } + + buf := new(bytes.Buffer) + for i := 0; i < 16; i++ { + if _, err := buf.Write([]byte{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xA, 0xB, 0xC, 0xD, 0xE, 0xF}); err != nil { + t.Fatal(err) + } + } + expected := buf.Bytes() + runTestCase(t, buf) + + file, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(file.Name()) + defer file.Close() + + if _, err = io.Copy(file, bytes.NewReader(expected)); err != nil { + t.Fatal(err) + } else if _, err = file.Seek(0, io.SeekStart); err != nil { + t.Fatal(err) + } + runTestCase(t, file) + + bytesNopCloser := internal_io.NewBytesNopCloser(expected) + runTestCase(t, bytesNopCloser) +} diff --git a/internal/io/nopcloser.go b/internal/io/nopcloser.go new file mode 100644 index 00000000..663d9577 --- /dev/null +++ b/internal/io/nopcloser.go @@ -0,0 +1,59 @@ +package io + +import ( + "bytes" + "io" +) + +type ReadSeekableNopCloser struct { + r io.ReadSeeker +} + +func NewReadSeekableNopCloser(r io.ReadSeeker) ReadSeekableNopCloser { + return ReadSeekableNopCloser{r: r} +} + +func (nc ReadSeekableNopCloser) Read(p []byte) (int, error) { + return nc.r.Read(p) +} + +func (nc ReadSeekableNopCloser) Seek(offset int64, whence int) (int64, error) { + return nc.r.Seek(offset, whence) +} + +func (nc ReadSeekableNopCloser) Close() error { + return nil +} + +type BytesNopCloser struct { + r *bytes.Reader + b []byte +} + +func NewBytesNopCloser(b []byte) *BytesNopCloser { + return &BytesNopCloser{r: bytes.NewReader(b), b: b} +} + +func (nc *BytesNopCloser) Read(p []byte) (int, error) { + return nc.r.Read(p) +} + +func (nc *BytesNopCloser) ReadAt(b []byte, off int64) (int, error) { + return nc.r.ReadAt(b, off) +} + +func (nc *BytesNopCloser) Seek(offset int64, whence int) (int64, error) { + return nc.r.Seek(offset, whence) +} + +func (nc *BytesNopCloser) Size() int64 { + return nc.r.Size() +} + +func (nc *BytesNopCloser) Close() error { + return nil +} + +func (nc *BytesNopCloser) Bytes() []byte { + return nc.b +} diff --git a/storage/base64_upload.go b/storage/base64_upload.go index e02a10fa..b205f4b2 100644 --- a/storage/base64_upload.go +++ b/storage/base64_upload.go @@ -5,7 +5,6 @@ import ( "context" "encoding/base64" "fmt" - "github.com/qiniu/go-sdk/v7/internal/hostprovider" "hash/crc32" "io" "net/http" @@ -13,6 +12,9 @@ import ( "strings" "time" + "github.com/qiniu/go-sdk/v7/conf" + "github.com/qiniu/go-sdk/v7/internal/hostprovider" + "github.com/qiniu/go-sdk/v7/client" ) @@ -159,8 +161,8 @@ func (p *Base64Uploader) put( return doUploadAction(upHostProvider, extra.TryTimes, extra.HostFreezeDuration, func(host string) error { postURL := fmt.Sprintf("%s%s", host, postPath.String()) headers := http.Header{} - headers.Add("Content-Type", "application/octet-stream") - headers.Add("Authorization", "UpToken "+uptoken) + headers.Set("Content-Type", conf.CONTENT_TYPE_OCTET) + headers.Set("Authorization", "UpToken "+uptoken) return p.client.CallWith(ctx, ret, "POST", postURL, headers, bytes.NewReader(base64Data), len(base64Data)) }) diff --git a/storage/bucket.go b/storage/bucket.go index f750e657..9cb1b86d 100644 --- a/storage/bucket.go +++ b/storage/bucket.go @@ -11,11 +11,13 @@ import ( "errors" "fmt" "net/url" - "strconv" "strings" "time" "github.com/qiniu/go-sdk/v7/internal/clientv2" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/apis/batch_ops" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" "github.com/qiniu/go-sdk/v7/auth" clientv1 "github.com/qiniu/go-sdk/v7/client" @@ -289,46 +291,21 @@ type BucketManagerOptions struct { // BucketManager 提供了对资源进行管理的操作 type BucketManager struct { - Client *clientv1.Client - Mac *auth.Credentials - Cfg *Config - options BucketManagerOptions + Client *clientv1.Client + Mac *auth.Credentials + Cfg *Config + options BucketManagerOptions + apiClient *apis.Storage } // NewBucketManager 用来构建一个新的资源管理对象 func NewBucketManager(mac *auth.Credentials, cfg *Config) *BucketManager { - if cfg == nil { - cfg = &Config{} - } - if cfg.CentralRsHost == "" { - cfg.CentralRsHost = DefaultRsHost - } - - return &BucketManager{ - Client: &clientv1.DefaultClient, - Mac: mac, - Cfg: cfg, - } + return NewBucketManagerEx(mac, cfg, &clientv1.DefaultClient) } // NewBucketManagerEx 用来构建一个新的资源管理对象 func NewBucketManagerEx(mac *auth.Credentials, cfg *Config, clt *clientv1.Client) *BucketManager { - if cfg == nil { - cfg = &Config{} - } - - if clt == nil { - clt = &clientv1.DefaultClient - } - if cfg.CentralRsHost == "" { - cfg.CentralRsHost = DefaultRsHost - } - - return &BucketManager{ - Client: clt, - Mac: mac, - Cfg: cfg, - } + return NewBucketManagerExWithOptions(mac, cfg, clt, BucketManagerOptions{}) } func NewBucketManagerExWithOptions(mac *auth.Credentials, cfg *Config, clt *clientv1.Client, options BucketManagerOptions) *BucketManager { @@ -343,11 +320,25 @@ func NewBucketManagerExWithOptions(mac *auth.Credentials, cfg *Config, clt *clie cfg.CentralRsHost = DefaultRsHost } + opts := http_client.Options{ + HostFreezeDuration: options.HostFreezeDuration, + HostRetryConfig: &clientv2.RetryConfig{ + RetryMax: options.RetryMax, + }, + Credentials: mac, + BasicHTTPClient: clt.Client, + UseInsecureProtocol: !cfg.UseHTTPS, + } + if region := cfg.GetRegion(); region != nil { + opts.Regions = region + } + return &BucketManager{ - Client: clt, - Mac: mac, - Cfg: cfg, - options: options, + Client: clt, + Mac: mac, + Cfg: cfg, + options: options, + apiClient: apis.NewStorage(&opts), } } @@ -365,46 +356,55 @@ func NewBucketManagerExWithOptions(mac *auth.Credentials, cfg *Config, clt *clie // 当文件不存在时,返回612 status code 612 {"error":"no such file or directory"} // 当文件当前状态和设置的状态已经一致,返回400 {"error":"already enabled"}或400 {"error":"already disabled"} func (m *BucketManager) UpdateObjectStatus(bucketName string, key string, enable bool) error { - var status string - ee := EncodedEntry(bucketName, key) + var status int64 if enable { - status = "0" + status = 0 } else { - status = "1" - } - path := fmt.Sprintf("/chstatus/%s/status/%s", ee, status) - - reqHost, reqErr := m.RsReqHost(bucketName) - if reqErr != nil { - return reqErr + status = 1 } - reqURL := fmt.Sprintf("%s%s", reqHost, path) - return m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) + _, err := m.apiClient.ModifyObjectStatus( + context.Background(), + &apis.ModifyObjectStatusRequest{ + Entry: bucketName + ":" + key, + Status: status, + }, + m.makeRequestOptions(), + ) + return err } // CreateBucket 创建一个七牛存储空间 func (m *BucketManager) CreateBucket(bucketName string, regionID RegionID) error { - reqURL := fmt.Sprintf("%s/mkbucketv3/%s/region/%s", getUcHost(m.Cfg.UseHTTPS), bucketName, string(regionID)) - return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, nil) -} - -// Buckets 用来获取空间列表,如果指定了 shared 参数为 true,那么一同列表被授权访问的空间 -func (m *BucketManager) Buckets(shared bool) (buckets []string, err error) { - reqURL := fmt.Sprintf("%s/buckets?shared=%v", getUcHost(m.Cfg.UseHTTPS), shared) - err = clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, &buckets) - return buckets, err + _, err := m.apiClient.CreateBucket( + context.Background(), + &apis.CreateBucketRequest{ + Bucket: bucketName, + Region: string(regionID), + }, + m.makeRequestOptions(), + ) + return err +} + +// Buckets 用来获取空间列表,如果指定了 shared 参数则额外包含仅被授予了读权限的空间,否则额外包含被授予了读写权限的授权空间 +func (m *BucketManager) Buckets(shared bool) ([]string, error) { + var sharedMode string + if shared { + sharedMode = "rd" + } else { + sharedMode = "rw" + } + response, err := m.apiClient.GetBuckets( + context.Background(), + &apis.GetBucketsRequest{ + Shared: sharedMode, + }, + m.makeRequestOptions(), + ) + if err != nil { + return nil, err + } + return response.BucketNames, nil } // BucketsV4 获取该用户的指定区域内的空间信息,注意该 API 以分页形式返回 Bucket 列表 @@ -412,40 +412,46 @@ func (m *BucketManager) BucketsV4(input *BucketV4Input) (output BucketsV4Output, if input == nil { input = &BucketV4Input{} } - reqURL := fmt.Sprintf("%s/buckets?apiVersion=v4", getUcHost(m.Cfg.UseHTTPS)) - query := make(url.Values) - if input.Region != "" { - query.Add("region", input.Region) - } - if input.Limit > 0 { - query.Add("limit", strconv.FormatUint(input.Limit, 10)) - } - if input.Marker != "" { - query.Add("marker", input.Marker) + response, err := m.apiClient.GetBucketsV4( + context.Background(), + &apis.GetBucketsV4Request{ + Region: input.Region, + Limit: int64(input.Limit), + Marker: input.Marker, + }, + m.makeRequestOptions(), + ) + if err != nil { + return } - if len(query) > 0 { - reqURL += "&" + query.Encode() + output.IsTruncated = response.IsTruncated + output.NextMarker = response.NextMarker + output.Buckets = make([]BucketV4Output, 0, len(response.Buckets)) + for _, bucket := range response.Buckets { + ctime, err := time.Parse(time.RFC3339, bucket.CreatedTime) + if err != nil { + return output, err + } + output.Buckets = append(output.Buckets, BucketV4Output{ + Name: bucket.Name, + Region: bucket.Region, + Private: bucket.Private, + Ctime: ctime, + }) } - err = clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodGet, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, &output) - return output, err + return } // DropBucket 删除七牛存储空间 -func (m *BucketManager) DropBucket(bucketName string) (err error) { - reqURL := fmt.Sprintf("%s/drop/%s", getUcHost(m.Cfg.UseHTTPS), bucketName) - return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, nil) +func (m *BucketManager) DropBucket(bucketName string) error { + _, err := m.apiClient.DeleteBucket( + context.Background(), + &apis.DeleteBucketRequest{ + Bucket: bucketName, + }, + m.makeRequestOptions(), + ) + return err } // Stat 用来获取一个文件的基本信息 @@ -458,71 +464,83 @@ type StatOpts struct { } // StatWithParts 用来获取一个文件的基本信息以及分片信息 -func (m *BucketManager) StatWithOpts(bucket, key string, opt *StatOpts) (info FileInfo, err error) { - reqHost, reqErr := m.RsReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - - reqURL := fmt.Sprintf("%s%s", reqHost, URIStat(bucket, key)) - if opt != nil { - if opt.NeedParts { - reqURL += "?needparts=true" - } - } - err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &info, "POST", reqURL, nil) - return +func (m *BucketManager) StatWithOpts(bucket, key string, opt *StatOpts) (FileInfo, error) { + if opt == nil { + opt = &StatOpts{} + } + response, err := m.apiClient.StatObject( + context.Background(), + &apis.StatObjectRequest{ + Entry: bucket + ":" + key, + NeedParts: opt.NeedParts, + }, + m.makeRequestOptions(), + ) + if err != nil { + return FileInfo{}, err + } + return FileInfo{ + Fsize: response.Size, + Hash: response.Hash, + MimeType: response.MimeType, + Type: int(response.Type), + PutTime: response.PutTime, + RestoreStatus: int(response.RestoringStatus), + Status: int(response.Status), + Md5: response.Md5, + EndUser: response.EndUser, + MetaData: response.Metadata, + Expiration: response.ExpirationTime, + TransitionToIA: response.TransitionToIaTime, + TransitionToArchive: response.TransitionToArchiveTime, + TransitionToDeepArchive: response.TransitionToDeepArchiveTime, + Parts: response.Parts, + }, nil } // Delete 用来删除空间中的一个文件 -func (m *BucketManager) Delete(bucket, key string) (err error) { - reqHost, reqErr := m.RsReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - reqURL := fmt.Sprintf("%s%s", reqHost, URIDelete(bucket, key)) - err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) - return +func (m *BucketManager) Delete(bucket, key string) error { + _, err := m.apiClient.DeleteObject( + context.Background(), + &apis.DeleteObjectRequest{ + Entry: bucket + ":" + key, + }, + m.makeRequestOptions(), + ) + return err } // Copy 用来创建已有空间中的文件的一个新的副本 -func (m *BucketManager) Copy(srcBucket, srcKey, destBucket, destKey string, force bool) (err error) { - reqHost, reqErr := m.RsReqHost(srcBucket) - if reqErr != nil { - err = reqErr - return - } - - reqURL := fmt.Sprintf("%s%s", reqHost, URICopy(srcBucket, srcKey, destBucket, destKey, force)) - err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) - return +func (m *BucketManager) Copy(srcBucket, srcKey, destBucket, destKey string, force bool) error { + _, err := m.apiClient.CopyObject( + context.Background(), + &apis.CopyObjectRequest{ + SrcEntry: srcBucket + ":" + srcKey, + DestEntry: destBucket + ":" + destKey, + IsForce: force, + }, + m.makeRequestOptions(), + ) + return err } // Move 用来将空间中的一个文件移动到新的空间或者重命名 -func (m *BucketManager) Move(srcBucket, srcKey, destBucket, destKey string, force bool) (err error) { - reqHost, reqErr := m.RsReqHost(srcBucket) - if reqErr != nil { - err = reqErr - return - } - - reqURL := fmt.Sprintf("%s%s", reqHost, URIMove(srcBucket, srcKey, destBucket, destKey, force)) - err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) - return +func (m *BucketManager) Move(srcBucket, srcKey, destBucket, destKey string, force bool) error { + _, err := m.apiClient.MoveObject( + context.Background(), + &apis.MoveObjectRequest{ + SrcEntry: srcBucket + ":" + srcKey, + DestEntry: destBucket + ":" + destKey, + IsForce: force, + }, + m.makeRequestOptions(), + ) + return err } // ChangeMime 用来更新文件的MimeType -func (m *BucketManager) ChangeMime(bucket, key, newMime string) (err error) { - reqHost, reqErr := m.RsReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - reqURL := fmt.Sprintf("%s%s", reqHost, URIChangeMime(bucket, key, newMime)) - err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) - return +func (m *BucketManager) ChangeMime(bucket, key, newMime string) error { + return m.ChangeMimeAndMeta(bucket, key, newMime, nil) } // ChangeMeta @@ -553,52 +571,60 @@ func (m *BucketManager) ChangeMeta(bucket, key string, metas map[string]string) // - key 如果包含了 x-qn-meta- 前缀,则直接使用 key; // - key 如果不包含了 x-qn-meta- 前缀,则内部会为 key 拼接 x-qn-meta- 前缀。 // @return err 错误信息 -func (m *BucketManager) ChangeMimeAndMeta(bucket, key, newMime string, metas map[string]string) (err error) { - reqHost, reqErr := m.RsReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - reqURL := fmt.Sprintf("%s%s", reqHost, URIChangeMimeAndMeta(bucket, key, newMime, metas)) - err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) - return +func (m *BucketManager) ChangeMimeAndMeta(bucket, key, newMime string, metas map[string]string) error { + metaData := make(map[string]string, len(metas)) + for k, v := range normalizeMeta(metas) { + metaData[k] = v + } + _, err := m.apiClient.ModifyObjectMetadata( + context.Background(), + &apis.ModifyObjectMetadataRequest{ + Entry: bucket + ":" + key, + MimeType: newMime, + MetaData: metaData, + }, + m.makeRequestOptions(), + ) + return err } // ChangeType 用来更新文件的存储类型,0 表示普通存储,1 表示低频存储,2 表示归档存储,3 表示深度归档存储,4 表示归档直读存储 -func (m *BucketManager) ChangeType(bucket, key string, fileType int) (err error) { - reqHost, reqErr := m.RsReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - reqURL := fmt.Sprintf("%s%s", reqHost, URIChangeType(bucket, key, fileType)) - err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) - return +func (m *BucketManager) ChangeType(bucket, key string, fileType int) error { + _, err := m.apiClient.SetObjectFileType( + context.Background(), + &apis.SetObjectFileTypeRequest{ + Entry: bucket + ":" + key, + Type: int64(fileType), + }, + m.makeRequestOptions(), + ) + return err } // RestoreAr 解冻归档存储类型的文件,可设置解冻有效期1~7天, 完成解冻任务通常需要1~5分钟 -func (m *BucketManager) RestoreAr(bucket, key string, freezeAfterDays int) (err error) { - reqHost, reqErr := m.RsReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - reqURL := fmt.Sprintf("%s%s", reqHost, URIRestoreAr(bucket, key, freezeAfterDays)) - err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) - return +func (m *BucketManager) RestoreAr(bucket, key string, freezeAfterDays int) error { + _, err := m.apiClient.RestoreArchivedObject( + context.Background(), + &apis.RestoreArchivedObjectRequest{ + Entry: bucket + ":" + key, + FreezeAfterDays: int64(freezeAfterDays), + }, + m.makeRequestOptions(), + ) + return err } // DeleteAfterDays 用来更新文件生命周期,如果 days 设置为0,则表示取消文件的定期删除功能,永久存储 -func (m *BucketManager) DeleteAfterDays(bucket, key string, days int) (err error) { - reqHost, reqErr := m.RsReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - - reqURL := fmt.Sprintf("%s%s", reqHost, URIDeleteAfterDays(bucket, key, days)) - err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) - return +func (m *BucketManager) DeleteAfterDays(bucket, key string, days int) error { + _, err := m.apiClient.DeleteObjectAfterDays( + context.Background(), + &apis.DeleteObjectAfterDaysRequest{ + Entry: bucket + ":" + key, + DeleteAfterDays: int64(days), + }, + m.makeRequestOptions(), + ) + return err } // Batch 接口提供了资源管理的批量操作,支持 stat,copy,move,delete,chgm,chtype,deleteAfterDays几个接口 @@ -631,44 +657,89 @@ func (m *BucketManager) Batch(operations []string) ([]BatchOpRet, error) { return m.BatchWithContext(context.Background(), bucket, operations) } +func convertToBatchOptRet(op batch_ops.OperationResponse) BatchOpRet { + var ret BatchOpRet + ret.Code = int(op.Code) + opData := op.Data + ret.Data.Fsize = opData.Size + ret.Data.Hash = opData.Hash + ret.Data.MimeType = opData.MimeType + ret.Data.Type = int(opData.Type) + ret.Data.PutTime = opData.PutTime + if restoringStatus := int(opData.RestoringStatus); restoringStatus != 0 { + ret.Data.RestoreStatus = &restoringStatus + } + if status := int(opData.Status); status != 0 { + ret.Data.Status = &status + } + ret.Data.Md5 = opData.Md5 + ret.Data.EndUser = opData.EndUser + if expirationTime := opData.ExpirationTime; expirationTime != 0 { + ret.Data.Expiration = &expirationTime + } + if transitionToIA := opData.TransitionToIaTime; transitionToIA != 0 { + ret.Data.TransitionToIA = &transitionToIA + } + if transitionToArchiveTime := opData.TransitionToArchiveTime; transitionToArchiveTime != 0 { + ret.Data.TransitionToArchive = &transitionToArchiveTime + } + if transitionToDeepArchiveTime := opData.TransitionToDeepArchiveTime; transitionToDeepArchiveTime != 0 { + ret.Data.TransitionToDeepArchive = &transitionToDeepArchiveTime + } + ret.Data.Error = opData.Error + return ret +} + // BatchWithContext 接口提供了资源管理的批量操作,支持 stat,copy,move,delete,chgm,chtype,deleteAfterDays几个接口 // @param ctx context.Context // @param bucket operations 列表中任意一个操作对象所属的 bucket // @param operations 操作对象列表,操作对象所属的 bucket 可能会不同,但是必须属于同一个区域 func (m *BucketManager) BatchWithContext(ctx context.Context, bucket string, operations []string) ([]BatchOpRet, error) { - host, err := m.RsReqHost(bucket) - if err != nil { - return nil, err - } - return m.batchOperation(ctx, host, operations) -} - -func (m *BucketManager) batchOperation(ctx context.Context, reqURL string, operations []string) (batchOpRet []BatchOpRet, err error) { if len(operations) > 1000 { - err = errors.New("batch operation count exceeds the limit of 1000") - return - } - params := map[string][]string{ - "op": operations, + return nil, errors.New("batch operation count exceeds the limit of 1000") } if ctx == nil { ctx = context.Background() } - reqURL = fmt.Sprintf("%s/batch", reqURL) - err = m.Client.CredentialedCallWithForm(ctx, m.Mac, auth.TokenQiniu, &batchOpRet, "POST", reqURL, nil, params) - return + + opts := m.makeRequestOptions() + opts.OverwrittenBucketName = bucket + response, err := m.apiClient.BatchOps( + ctx, + &apis.BatchOpsRequest{ + Operations: operations, + }, + opts, + ) + if err != nil { + return nil, err + } + rets := make([]BatchOpRet, 0, len(response.OperationResponses)) + for _, op := range response.OperationResponses { + rets = append(rets, convertToBatchOptRet(op)) + } + return rets, nil } // Fetch 根据提供的远程资源链接来抓取一个文件到空间并已指定文件名保存 -func (m *BucketManager) Fetch(resURL, bucket, key string) (fetchRet FetchRet, err error) { - reqHost, rErr := m.IoReqHost(bucket) - if rErr != nil { - err = rErr - return +func (m *BucketManager) Fetch(resURL, bucket, key string) (FetchRet, error) { + response, err := m.apiClient.FetchObject( + context.Background(), + &apis.FetchObjectRequest{ + FromUrl: resURL, + ToEntry: bucket + ":" + key, + }, + m.makeRequestOptions(), + ) + if err != nil { + return FetchRet{}, err } - reqURL := fmt.Sprintf("%s%s", reqHost, uriFetch(resURL, bucket, key)) - err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &fetchRet, "POST", reqURL, nil) - return + return FetchRet{ + Hash: response.Hash, + Fsize: response.Size, + MimeType: response.MimeType, + Key: response.ObjectName, + }, nil } func (m *BucketManager) RsReqHost(bucket string) (reqHost string, err error) { @@ -745,14 +816,23 @@ func (m *BucketManager) IoReqHost(bucket string) (reqHost string, err error) { // FetchWithoutKey 根据提供的远程资源链接来抓取一个文件到空间并以文件的内容hash作为文件名 func (m *BucketManager) FetchWithoutKey(resURL, bucket string) (fetchRet FetchRet, err error) { - reqHost, rErr := m.IoReqHost(bucket) - if rErr != nil { - err = rErr - return + response, err := m.apiClient.FetchObject( + context.Background(), + &apis.FetchObjectRequest{ + FromUrl: resURL, + ToEntry: bucket, + }, + m.makeRequestOptions(), + ) + if err != nil { + return FetchRet{}, err } - reqURL := fmt.Sprintf("%s%s", reqHost, uriFetchWithoutKey(resURL, bucket)) - err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, &fetchRet, "POST", reqURL, nil) - return + return FetchRet{ + Hash: response.Hash, + Fsize: response.Size, + MimeType: response.MimeType, + Key: response.ObjectName, + }, nil } // DomainInfo 是绑定在存储空间上的域名的具体信息 @@ -773,36 +853,36 @@ type DomainInfo struct { func (m *BucketManager) ListBucketDomains(bucket string) (info []DomainInfo, err error) { reqURL := fmt.Sprintf("%s/v3/domains?tbl=%s", getUcHost(m.Cfg.UseHTTPS), bucket) err = clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodGet, - Url: reqURL, - Header: nil, - BodyCreator: nil, + Context: context.Background(), + Method: clientv2.RequestMethodGet, + Url: reqURL, + Header: nil, + GetBody: nil, }, &info) return info, err } // Prefetch 用来同步镜像空间的资源和镜像源资源内容 -func (m *BucketManager) Prefetch(bucket, key string) (err error) { - reqHost, reqErr := m.IoReqHost(bucket) - if reqErr != nil { - err = reqErr - return - } - reqURL := fmt.Sprintf("%s%s", reqHost, uriPrefetch(bucket, key)) - err = m.Client.CredentialedCall(context.Background(), m.Mac, auth.TokenQiniu, nil, "POST", reqURL, nil) - return +func (m *BucketManager) Prefetch(bucket, key string) error { + _, err := m.apiClient.PrefetchObject( + context.Background(), + &apis.PrefetchObjectRequest{ + Entry: bucket + ":" + key, + }, + m.makeRequestOptions(), + ) + return err } // SetImage 用来设置空间镜像源 func (m *BucketManager) SetImage(siteURL, bucket string) (err error) { reqURL := fmt.Sprintf("%s%s", getUcHost(m.Cfg.UseHTTPS), uriSetImage(siteURL, bucket)) return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, + Context: context.Background(), + Method: clientv2.RequestMethodPost, + Url: reqURL, + Header: nil, + GetBody: nil, }, nil) } @@ -811,11 +891,11 @@ func (m *BucketManager) SetImageWithHost(siteURL, bucket, host string) (err erro reqURL := fmt.Sprintf("%s%s", getUcHost(m.Cfg.UseHTTPS), uriSetImageWithHost(siteURL, bucket, host)) return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, + Context: context.Background(), + Method: clientv2.RequestMethodPost, + Url: reqURL, + Header: nil, + GetBody: nil, }, nil) } @@ -823,11 +903,11 @@ func (m *BucketManager) SetImageWithHost(siteURL, bucket, host string) (err erro func (m *BucketManager) UnsetImage(bucket string) (err error) { reqURL := fmt.Sprintf("%s%s", getUcHost(m.Cfg.UseHTTPS), uriUnsetImage(bucket)) return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, + Context: context.Background(), + Method: clientv2.RequestMethodPost, + Url: reqURL, + Header: nil, + GetBody: nil, }, nil) } @@ -850,16 +930,29 @@ type AsyncFetchRet struct { } func (m *BucketManager) AsyncFetch(param AsyncFetchParam) (ret AsyncFetchRet, err error) { - - reqUrl, err := m.ApiReqHost(param.Bucket) + response, err := m.apiClient.AsyncFetchObject( + context.Background(), + &apis.AsyncFetchObjectRequest{ + Url: param.Url, + Bucket: param.Bucket, + Host: param.Host, + Key: param.Key, + Md5: param.Md5, + Etag: param.Etag, + CallbackUrl: param.CallbackURL, + CallbackBody: param.CallbackBody, + CallbackBodyType: param.CallbackBodyType, + FileType: int64(param.FileType), + }, + m.makeRequestOptions(), + ) if err != nil { - return + return AsyncFetchRet{}, err } - - reqUrl += "/sisyphus/fetch" - - err = m.Client.CredentialedCallWithJson(context.Background(), m.Mac, auth.TokenQiniu, &ret, "POST", reqUrl, nil, param) - return + return AsyncFetchRet{ + Id: response.Id, + Wait: int(response.QueuedTasksCount), + }, nil } func (m *BucketManager) RsHost(bucket string) (rsHost string, err error) { @@ -929,6 +1022,10 @@ func (m *BucketManager) Zone(bucket string) (z *Zone, err error) { return } +func (m *BucketManager) makeRequestOptions() *apis.Options { + return &apis.Options{OverwrittenBucketHosts: getUcEndpoint(m.Cfg.UseHTTPS)} +} + // 构建op的方法,导出的方法支持在Batch操作中使用 // URIStat 构建 stat 接口的请求命令 @@ -995,16 +1092,24 @@ func URIChangeMimeAndMeta(bucket, key, newMime string, metas map[string]string) uri = fmt.Sprintf("%s/mime/%s", uri, base64.URLEncoding.EncodeToString([]byte(newMime))) } if len(metas) > 0 { - for k, v := range metas { - if !strings.HasPrefix(k, "x-qn-meta-") { - k = "x-qn-meta-" + k - } + for k, v := range normalizeMeta(metas) { uri = fmt.Sprintf("%s/%s/%s", uri, k, base64.URLEncoding.EncodeToString([]byte(v))) } } return uri } +func normalizeMeta(metas map[string]string) map[string]string { + newMetas := make(map[string]string, len(metas)) + for k, v := range metas { + if !strings.HasPrefix(k, "x-qn-meta-") { + k = "x-qn-meta-" + k + } + newMetas[k] = v + } + return newMetas +} + // URIChangeType 构建 chtype 接口的请求命令 func URIChangeType(bucket, key string, fileType int) string { return fmt.Sprintf("/chtype/%s/type/%d", EncodedEntry(bucket, key), fileType) @@ -1015,21 +1120,6 @@ func URIRestoreAr(bucket, key string, afterDay int) string { return fmt.Sprintf("/restoreAr/%s/freezeAfterDays/%d", EncodedEntry(bucket, key), afterDay) } -// 构建op的方法,非导出的方法无法用在Batch操作中 -func uriFetch(resURL, bucket, key string) string { - return fmt.Sprintf("/fetch/%s/to/%s", - base64.URLEncoding.EncodeToString([]byte(resURL)), EncodedEntry(bucket, key)) -} - -func uriFetchWithoutKey(resURL, bucket string) string { - return fmt.Sprintf("/fetch/%s/to/%s", - base64.URLEncoding.EncodeToString([]byte(resURL)), EncodedEntryWithoutKey(bucket)) -} - -func uriPrefetch(bucket, key string) string { - return fmt.Sprintf("/prefetch/%s", EncodedEntry(bucket, key)) -} - func uriSetImage(siteURL, bucket string) string { return fmt.Sprintf("/image/%s/from/%s", bucket, base64.URLEncoding.EncodeToString([]byte(siteURL))) diff --git a/storage/bucket_test.go b/storage/bucket_test.go index 7c4d77cf..9d1e21fd 100644 --- a/storage/bucket_test.go +++ b/storage/bucket_test.go @@ -59,6 +59,7 @@ func init() { mac = auth.New(testAK, testSK) cfg := Config{} cfg.UseCdnDomains = false + cfg.UseHTTPS = true bucketManager = NewBucketManagerEx(mac, &cfg, &clt) operationManager = NewOperationManagerEx(mac, &cfg, &clt) formUploader = NewFormUploaderEx(&cfg, &clt) @@ -797,16 +798,23 @@ func TestBucketLifeCycleRule(t *testing.T) { if err != nil { t.Fatalf("TestBucketLifeCycleRule: %v\n", err) } - ruleExists := false - for _, r := range rules { - if r.Name == "golangIntegrationTest" && r.Prefix == "testPutFileKey" && r.DeleteAfterDays == 13 && - r.ToLineAfterDays == 1 && r.ToArchiveIRAfterDays == 2 && r.ToArchiveAfterDays == 6 && r.ToDeepArchiveAfterDays == 10 { - ruleExists = true + var foundRule *BucketLifeCycleRule + for i := range rules { + if rules[i].Name == "golangIntegrationTest" && rules[i].Prefix == "testPutFileKey" { + foundRule = &rules[i] break } } - if !ruleExists { - t.Fatalf("TestBucketLifeCycleRule: %v\n", err) + if foundRule == nil { + t.Fatalf("TestBucketLifeCycleRule: rule name not found") + } else if foundRule.DeleteAfterDays != 13 { + t.Fatalf("TestBucketLifeCycleRule: foundRule.DeleteAfterDays = %d", foundRule.DeleteAfterDays) + } else if foundRule.ToLineAfterDays != 1 { + t.Fatalf("TestBucketLifeCycleRule: foundRule.ToLineAfterDays = %d", foundRule.ToLineAfterDays) + } else if foundRule.ToArchiveAfterDays != 6 { + t.Fatalf("TestBucketLifeCycleRule: foundRule.ToArchiveAfterDays = %d", foundRule.ToArchiveAfterDays) + } else if foundRule.ToDeepArchiveAfterDays != 10 { + t.Fatalf("TestBucketLifeCycleRule: foundRule.ToDeepArchiveAfterDays = %d", foundRule.ToDeepArchiveAfterDays) } err = bucketManager.UpdateBucketLifeCycleRule(testBucket, &BucketLifeCycleRule{ @@ -827,16 +835,23 @@ func TestBucketLifeCycleRule(t *testing.T) { if err != nil { t.Fatalf("TestBucketLifeCycleRule: %v\n", err) } - ruleExists = false - for _, r := range rules { - if r.Name == "golangIntegrationTest" && r.Prefix == "testPutFileKey" && r.DeleteAfterDays == 22 && - r.ToLineAfterDays == 11 && r.ToArchiveIRAfterDays == 12 && r.ToArchiveAfterDays == 16 && r.ToDeepArchiveAfterDays == 20 { - ruleExists = true + foundRule = nil + for i := range rules { + if rules[i].Name == "golangIntegrationTest" && rules[i].Prefix == "testPutFileKey" { + foundRule = &rules[i] break } } - if !ruleExists { - t.Fatalf("TestBucketLifeCycleRule: %v\n", err) + if foundRule == nil { + t.Fatalf("TestBucketLifeCycleRule: rule name not found") + } else if foundRule.DeleteAfterDays != 22 { + t.Fatalf("TestBucketLifeCycleRule: foundRule.DeleteAfterDays = %d", foundRule.DeleteAfterDays) + } else if foundRule.ToLineAfterDays != 11 { + t.Fatalf("TestBucketLifeCycleRule: foundRule.ToLineAfterDays = %d", foundRule.ToLineAfterDays) + } else if foundRule.ToArchiveAfterDays != 16 { + t.Fatalf("TestBucketLifeCycleRule: foundRule.ToArchiveAfterDays = %d", foundRule.ToArchiveAfterDays) + } else if foundRule.ToDeepArchiveAfterDays != 20 { + t.Fatalf("TestBucketLifeCycleRule: foundRule.ToDeepArchiveAfterDays = %d", foundRule.ToDeepArchiveAfterDays) } err = bucketManager.DelBucketLifeCycleRule(testBucket, "golangIntegrationTest") diff --git a/storage/form_upload.go b/storage/form_upload.go index 8cd7e361..c3f09838 100644 --- a/storage/form_upload.go +++ b/storage/form_upload.go @@ -3,23 +3,20 @@ package storage import ( "bytes" "context" - "fmt" - "hash" + "errors" "hash/crc32" "io" - "io/ioutil" - "mime/multipart" - "net/http" - "net/textproto" "os" "path" "path/filepath" "strings" "time" - "github.com/qiniu/go-sdk/v7/internal/hostprovider" - "github.com/qiniu/go-sdk/v7/client" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" ) // PutExtra 为表单上传的额外可选项 @@ -52,10 +49,6 @@ func (extra *PutExtra) init() { } } -func (extra *PutExtra) getUpHost(useHttps bool) string { - return hostAddSchemeIfNeeded(useHttps, extra.UpHost) -} - // PutRet 为七牛标准的上传回复内容。 // 如果使用了上传回调或者自定义了returnBody,那么需要根据实际情况,自己自定义一个返回值结构体 type PutRet struct { @@ -66,20 +59,14 @@ type PutRet struct { // FormUploader 表示一个表单上传的对象 type FormUploader struct { - Client *client.Client - Cfg *Config + Client *client.Client + Cfg *Config + storage *apis.Storage } // NewFormUploader 用来构建一个表单上传的对象 func NewFormUploader(cfg *Config) *FormUploader { - if cfg == nil { - cfg = &Config{} - } - - return &FormUploader{ - Client: &client.DefaultClient, - Cfg: cfg, - } + return NewFormUploaderEx(cfg, nil) } // NewFormUploaderEx 用来构建一个表单上传的对象 @@ -91,10 +78,18 @@ func NewFormUploaderEx(cfg *Config, clt *client.Client) *FormUploader { if clt == nil { clt = &client.DefaultClient } + opts := http_client.Options{ + BasicHTTPClient: clt.Client, + UseInsecureProtocol: !cfg.UseHTTPS, + } + if region := cfg.GetRegion(); region != nil { + opts.Regions = region + } return &FormUploader{ - Client: clt, - Cfg: cfg, + Client: clt, + Cfg: cfg, + storage: apis.NewStorage(&opts), } } @@ -170,7 +165,7 @@ func (p *FormUploader) Put( // extra 是上传的一些可选项。详细见 PutExtra 结构的描述。 func (p *FormUploader) PutWithoutKey( ctx context.Context, ret interface{}, uptoken string, data io.Reader, size int64, extra *PutExtra) (err error) { - err = p.put(ctx, ret, uptoken, "", false, data, size, extra, "filename") + err = p.put(ctx, ret, uptoken, "", false, data, size, extra, "") return err } @@ -185,7 +180,7 @@ func (p *FormUploader) put( seekableData, ok := data.(io.ReadSeeker) if !ok { - dataBytes, rErr := ioutil.ReadAll(data) + dataBytes, rErr := internal_io.ReadAll(data) if rErr != nil { return rErr } @@ -200,147 +195,31 @@ func (p *FormUploader) put( func (p *FormUploader) putSeekableData(ctx context.Context, ret interface{}, upToken string, key string, hasKey bool, data io.ReadSeeker, dataSize int64, extra *PutExtra, fileName string) error { - - formFieldBuff := new(bytes.Buffer) - formWriter := multipart.NewWriter(formFieldBuff) - // 写入表单头、token、key、fileName 等信息 - if wErr := writeMultipart(formWriter, upToken, key, hasKey, extra, fileName); wErr != nil { - return wErr - } - - // 计算文件 crc32 - crc32Hash := crc32.NewIEEE() - if _, cErr := io.Copy(crc32Hash, data); cErr != nil { - return cErr - } - crcReader := newCrc32Reader(formWriter.Boundary(), crc32Hash) - crcBytes, rErr := ioutil.ReadAll(crcReader) - if rErr != nil { - return rErr - } - crcReader = nil - - // 表单写入文件 crc32 - if _, wErr := formFieldBuff.Write(crcBytes); wErr != nil { - return wErr - } - crcBytes = nil - - formHead := make(textproto.MIMEHeader) - formHead.Set("Content-Disposition", fmt.Sprintf(`form-data; name="file"; filename="%s"`, - escapeQuotes(fileName))) - if extra.MimeType != "" { - formHead.Set("Content-Type", extra.MimeType) - } - if _, cErr := formWriter.CreatePart(formHead); cErr != nil { - return cErr - } - formHead = nil - - // 表单 Fields - formFieldData := formFieldBuff.Bytes() - formFieldBuff = nil - - // 表单最后一行 - formEndLine := []byte(fmt.Sprintf("\r\n--%s--\r\n", formWriter.Boundary())) - - // 不再重新构造 formBody ,避免内存峰值问题 - var formBodyLen int64 = -1 - if dataSize >= 0 { - formBodyLen = int64(len(formFieldData)) + dataSize + int64(len(formEndLine)) - } - - progress := newUploadProgress(extra.OnProgress) - getBodyReader := func() (io.Reader, error) { - if _, err := data.Seek(0, io.SeekStart); err != nil { - return nil, err - } - - var formReader = io.MultiReader(bytes.NewReader(formFieldData), data, bytes.NewReader(formEndLine)) - if extra.OnProgress != nil { - formReader = &readerWithProgress{reader: formReader, fsize: formBodyLen, onProgress: progress.onProgress} - } - return formReader, nil - } - getBodyReadCloser := func() (io.ReadCloser, error) { - reader, err := getBodyReader() - if err != nil { - return nil, err - } - return ioutil.NopCloser(reader), nil - } - - var err error - var hostProvider hostprovider.HostProvider = nil - if extra.UpHost != "" { - hostProvider = hostprovider.NewWithHosts([]string{extra.getUpHost(p.Cfg.UseHTTPS)}) - } else { - hostProvider, err = p.getUpHostProviderFromUploadToken(upToken, extra) - if err != nil { - return err - } - } - - // 上传 - contentType := formWriter.FormDataContentType() - headers := http.Header{} - headers.Add("Content-Type", contentType) - err = doUploadAction(hostProvider, extra.TryTimes, extra.HostFreezeDuration, func(host string) error { - reader, gErr := getBodyReader() - if gErr != nil { - return gErr - } - - return p.Client.CallWithBodyGetter(ctx, ret, "POST", host, headers, reader, getBodyReadCloser, formBodyLen) - }) - if err != nil { - return err + if fileName == "" { + fileName = "Untitled" } + var fileReader io.Reader = data if extra.OnProgress != nil { - extra.OnProgress(formBodyLen, formBodyLen) + fileReader = &readerWithProgress{reader: data, fsize: dataSize, onProgress: extra.OnProgress} } - return nil -} - -func (p *FormUploader) getUpHostProviderFromUploadToken(upToken string, extra *PutExtra) (hostprovider.HostProvider, error) { - ak, bucket, err := getAkBucketFromUploadToken(upToken) - if err != nil { - return nil, err - } - return getUpHostProvider(p.Cfg, extra.TryTimes, extra.HostFreezeDuration, ak, bucket) -} - -type crc32Reader struct { - h hash.Hash32 - boundary string - r io.Reader - inited bool - nlDashBoundaryNl string - header string - crc32PadLen int64 -} - -func newCrc32Reader(boundary string, h hash.Hash32) *crc32Reader { - nlDashBoundaryNl := fmt.Sprintf("\r\n--%s\r\n", boundary) - header := `Content-Disposition: form-data; name="crc32"` + "\r\n\r\n" - return &crc32Reader{ - h: h, - boundary: boundary, - nlDashBoundaryNl: nlDashBoundaryNl, - header: header, - crc32PadLen: 10, + request := apis.PostObjectRequest{ + ObjectName: makeKeyForUploading(key, hasKey), + UploadToken: uptoken.NewParser(upToken), + File: http_client.MultipartFormBinaryData{ + Data: internal_io.MakeReadSeekCloserFromLimitedReader(fileReader, dataSize), + Name: fileName, + }, + CustomData: makeCustomData(extra.Params), + ResponseBody: ret, } -} - -func (r *crc32Reader) Read(p []byte) (int, error) { - if !r.inited { - crc32Sum := r.h.Sum32() - crc32Line := r.nlDashBoundaryNl + r.header + fmt.Sprintf("%010d", crc32Sum) //padding crc32 results to 10 digits - r.r = strings.NewReader(crc32Line) - r.inited = true + if crc32, ok, err := crc32FromReader(data); err != nil { + return err + } else if ok { + request.Crc32 = int64(crc32) } - return r.r.Read(p) + _, err := p.storage.PostObject(ctx, &request, makeApiOptionsFromUpHost(extra.UpHost)) + return err } func (p *FormUploader) UpHost(ak, bucket string) (upHost string, err error) { @@ -367,38 +246,43 @@ func (p *readerWithProgress) Read(b []byte) (n int, err error) { return } -func writeMultipart(writer *multipart.Writer, uptoken, key string, hasKey bool, - extra *PutExtra, fileName string) (err error) { - - //token - if err = writer.WriteField("token", uptoken); err != nil { - return - } - - //key - if hasKey { - if err = writer.WriteField("key", key); err != nil { - return +func (p *readerWithProgress) Seek(offset int64, whence int) (int64, error) { + if seeker, ok := p.reader.(io.Seeker); ok { + pos, err := seeker.Seek(offset, whence) + if err != nil { + return pos, err } + p.uploaded = pos + p.onProgress(p.fsize, p.uploaded) + return pos, nil } + return 0, errors.New("resource not support seek") +} - //extra.Params - if extra.Params != nil { - for k, v := range extra.Params { - if (strings.HasPrefix(k, "x:") || strings.HasPrefix(k, "x-qn-meta-")) && v != "" { - err = writer.WriteField(k, v) - if err != nil { - return - } - } +func makeCustomData(params map[string]string) map[string]string { + customData := make(map[string]string, len(params)) + for k, v := range params { + if (strings.HasPrefix(k, "x:") || strings.HasPrefix(k, "x-qn-meta-")) && v != "" { + customData[k] = v } } - - return err + return customData } -var quoteEscaper = strings.NewReplacer("\\", "\\\\", `"`, "\\\"") - -func escapeQuotes(s string) string { - return quoteEscaper.Replace(s) +func crc32FromReader(r io.Reader) (uint32, bool, error) { + if readSeeker, ok := r.(io.ReadSeeker); ok { + _, err := readSeeker.Seek(0, io.SeekStart) + if err != nil { + return 0, false, err + } + hasher := crc32.NewIEEE() + if _, err = io.Copy(hasher, readSeeker); err != nil { + return 0, false, err + } + if _, err = readSeeker.Seek(0, io.SeekStart); err != nil { + return 0, false, err + } + return hasher.Sum32(), true, nil + } + return 0, false, nil } diff --git a/storage/region.go b/storage/region.go index dc21f3f0..6b34958b 100644 --- a/storage/region.go +++ b/storage/region.go @@ -10,6 +10,9 @@ import ( "github.com/qiniu/go-sdk/v7/client" "github.com/qiniu/go-sdk/v7/internal/clientv2" "github.com/qiniu/go-sdk/v7/internal/hostprovider" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region_v2 "github.com/qiniu/go-sdk/v7/storagev2/region" ) // 存储所在的地区,例如华东,华南,华北 @@ -95,6 +98,28 @@ func (r *Region) GetApiHost(useHttps bool) string { return endpoint(useHttps, r.ApiHost) } +func (r *Region) GetRegions(ctx context.Context) ([]*region_v2.Region, error) { + newRegion := ®ion_v2.Region{ + Up: region_v2.Endpoints{Preferred: append(r.CdnUpHosts, r.SrcUpHosts...)}, + } + if host := r.IovipHost; host != "" { + newRegion.Io = region_v2.Endpoints{Preferred: []string{host}} + } + if host := r.IoSrcHost; host != "" { + newRegion.IoSrc = region_v2.Endpoints{Preferred: []string{host}} + } + if host := r.RsHost; host != "" { + newRegion.Rs = region_v2.Endpoints{Preferred: []string{host}} + } + if host := r.RsfHost; host != "" { + newRegion.Rsf = region_v2.Endpoints{Preferred: []string{host}} + } + if host := r.ApiHost; host != "" { + newRegion.Api = region_v2.Endpoints{Preferred: []string{host}} + } + return []*region_v2.Region{newRegion}, nil +} + var ( // regionHuadong 表示华东机房 regionHuadong = Region{ @@ -265,6 +290,23 @@ func getUcBackupHosts() []string { return hosts } +func getUcEndpoint(useHttps bool) region_v2.EndpointsProvider { + ucHosts := make([]string, 0, 1+len(ucHosts)) + if len(UcHost) > 0 { + ucHosts = append(ucHosts, endpoint(useHttps, UcHost)) + } + for _, host := range ucHosts { + if len(host) > 0 { + ucHosts = append(ucHosts, endpoint(useHttps, host)) + } + } + if len(ucHosts) > 0 { + return region_v2.Endpoints{Preferred: ucHosts} + } else { + return nil + } +} + // GetRegion 用来根据ak和bucket来获取空间相关的机房信息 // 延用 v2, v2 结构和 v4 结构不同且暂不可替代 // Deprecated 使用 GetRegionWithOptions 替换 @@ -305,28 +347,27 @@ func GetRegionsInfo(mac *auth.Credentials) ([]RegionInfo, error) { } func GetRegionsInfoWithOptions(mac *auth.Credentials, options UCApiOptions) ([]RegionInfo, error) { - var regions struct { - Regions []RegionInfo `json:"regions"` - } - - reqUrl := getUcHost(options.UseHttps) + "/regions" - c := getUCClient(ucClientConfig{ - IsUcQueryApi: false, - RetryMax: options.RetryMax, + response, err := apis.NewStorage(&http_client.Options{ HostFreezeDuration: options.HostFreezeDuration, - }, mac) - qErr := clientv2.DoAndDecodeJsonResponse(c, clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodGet, - Url: reqUrl, - Header: nil, - BodyCreator: nil, - }, ®ions) - if qErr != nil { - return nil, fmt.Errorf("query region error, %s", qErr.Error()) - } else { - return regions.Regions, nil + HostRetryConfig: &clientv2.RetryConfig{ + RetryMax: options.RetryMax, + }, + }).GetRegions( + context.Background(), + &apis.GetRegionsRequest{Credentials: mac}, + &apis.Options{OverwrittenBucketHosts: getUcEndpoint(options.UseHttps)}, + ) + if err != nil { + return nil, err + } + regions := make([]RegionInfo, 0, len(response.Regions)) + for _, region := range response.Regions { + regions = append(regions, RegionInfo{ + ID: region.Id, + Description: region.Description, + }) } + return regions, nil } type ucClientConfig struct { @@ -376,7 +417,7 @@ func getUCClient(config ucClientConfig, mac *auth.Credentials) clientv2.Client { if mac != nil { is = append(is, clientv2.NewAuthInterceptor(clientv2.AuthConfig{ - Credentials: *mac, + Credentials: mac, TokenType: auth.TokenQiniu, })) } diff --git a/storage/region_uc_v2.go b/storage/region_uc_v2.go index 2e0b5505..9e6678a1 100644 --- a/storage/region_uc_v2.go +++ b/storage/region_uc_v2.go @@ -256,11 +256,11 @@ func getRegionByV2(ak, bucket string, options UCApiOptions) (*Region, error) { HostFreezeDuration: options.HostFreezeDuration, }, nil) err := clientv2.DoAndDecodeJsonResponse(c, clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodGet, - Url: reqURL, - Header: nil, - BodyCreator: nil, + Context: context.Background(), + Method: clientv2.RequestMethodGet, + Url: reqURL, + Header: nil, + GetBody: nil, }, &ret) if err != nil { return nil, fmt.Errorf("query region error, %s", err.Error()) diff --git a/storage/region_uc_v4.go b/storage/region_uc_v4.go index 95c55b7f..0f8bac4e 100644 --- a/storage/region_uc_v4.go +++ b/storage/region_uc_v4.go @@ -156,11 +156,11 @@ func getRegionByV4(ak, bucket string, options UCApiOptions) (*RegionGroup, error HostFreezeDuration: options.HostFreezeDuration, }, nil) err := clientv2.DoAndDecodeJsonResponse(c, clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodGet, - Url: reqURL, - Header: nil, - BodyCreator: nil, + Context: context.Background(), + Method: clientv2.RequestMethodGet, + Url: reqURL, + Header: nil, + GetBody: nil, }, &ret) if err != nil { return nil, fmt.Errorf("query region error, %s", err.Error()) diff --git a/storage/resume_uploader.go b/storage/resume_uploader.go index 30ab891c..5795d31b 100644 --- a/storage/resume_uploader.go +++ b/storage/resume_uploader.go @@ -12,13 +12,17 @@ import ( "sync" "github.com/qiniu/go-sdk/v7/client" - "github.com/qiniu/go-sdk/v7/internal/hostprovider" + "github.com/qiniu/go-sdk/v7/internal/clientv2" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" ) // ResumeUploader 表示一个分片上传的对象 type ResumeUploader struct { - Client *client.Client - Cfg *Config + Client *client.Client + Cfg *Config + storage *apis.Storage } // NewResumeUploader 表示构建一个新的分片上传的对象 @@ -36,9 +40,18 @@ func NewResumeUploaderEx(cfg *Config, clt *client.Client) *ResumeUploader { clt = &client.DefaultClient } + opts := http_client.Options{ + BasicHTTPClient: clt.Client, + UseInsecureProtocol: !cfg.UseHTTPS, + } + if region := cfg.GetRegion(); region != nil { + opts.Regions = region + } + return &ResumeUploader{ - Client: clt, - Cfg: cfg, + Client: clt, + Cfg: cfg, + storage: apis.NewStorage(&opts), } } @@ -122,31 +135,17 @@ func (p *ResumeUploader) rput(ctx context.Context, ret interface{}, upToken stri extra.init() var ( - accessKey, bucket, recorderKey string - fileInfo os.FileInfo = nil - hostProvider hostprovider.HostProvider = nil + recorderKey string + fileInfo os.FileInfo = nil ) if fileDetails != nil { fileInfo = fileDetails.fileInfo } - if accessKey, bucket, err = getAkBucketFromUploadToken(upToken); err != nil { - return - } - - if extra.UpHost != "" { - hostProvider = hostprovider.NewWithHosts([]string{extra.getUpHost(p.Cfg.UseHTTPS)}) - } else { - hostProvider, err = p.resumeUploaderAPIs().upHostProvider(accessKey, bucket, extra.TryTimes, extra.HostFreezeDuration) - if err != nil { - return - } - } - recorderKey = getRecorderKey(extra.Recorder, upToken, key, "v1", blockSize, fileDetails) return uploadByWorkers( - newResumeUploaderImpl(p, key, hasKey, upToken, hostProvider, fileInfo, extra, ret, recorderKey), + newResumeUploaderImpl(p, key, hasKey, upToken, makeEndpointsFromUpHost(extra.UpHost), fileInfo, extra, ret, recorderKey), ctx, newSizedChunkReader(f, fsize, blockSize)) } @@ -156,26 +155,8 @@ func (p *ResumeUploader) rputWithoutSize(ctx context.Context, ret interface{}, u } extra.init() - var ( - accessKey, bucket string - hostProvider hostprovider.HostProvider = nil - ) - - if accessKey, bucket, err = getAkBucketFromUploadToken(upToken); err != nil { - return - } - - if extra.UpHost != "" { - hostProvider = hostprovider.NewWithHosts([]string{extra.getUpHost(p.Cfg.UseHTTPS)}) - } else { - hostProvider, err = p.resumeUploaderAPIs().upHostProvider(accessKey, bucket, extra.TryTimes, extra.HostFreezeDuration) - if err != nil { - return - } - } - return uploadByWorkers( - newResumeUploaderImpl(p, key, hasKey, upToken, hostProvider, nil, extra, ret, ""), + newResumeUploaderImpl(p, key, hasKey, upToken, makeEndpointsFromUpHost(extra.UpHost), nil, extra, ret, ""), ctx, newUnsizedChunkReader(r, 1< 0 { + opts.HostRetryConfig = &clientv2.RetryConfig{ + RetryMax: extra.TryTimes, + } + } + if extra.HostFreezeDuration > 0 { + opts.HostFreezeDuration = extra.HostFreezeDuration + } + } return &resumeUploaderImpl{ - client: resumeUploader.Client, - cfg: resumeUploader.Cfg, - key: key, - hasKey: hasKey, - upToken: upToken, - upHostProvider: upHostProvider, - bufPool: &sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(make([]byte, 0, extra.ChunkSize)) - }, - }, + cfg: resumeUploader.Cfg, + key: key, + hasKey: hasKey, + upToken: upToken, + upEndpoints: upEndpoints, extra: extra, ret: ret, fileSize: 0, fileInfo: fileInfo, recorderKey: recorderKey, + storage: apis.NewStorage(&opts), + bufPool: &sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, extra.ChunkSize)) + }, + }, } } @@ -332,34 +330,17 @@ func (impl *resumeUploaderImpl) uploadChunk(ctx context.Context, c chunk) error seekableData := bytes.NewReader(buffer.Bytes()) if chunkOffset == 0 { - err = doUploadAction(impl.upHostProvider, impl.extra.TryTimes, impl.extra.HostFreezeDuration, func(host string) error { - if _, sErr := seekableData.Seek(0, io.SeekStart); sErr != nil { - return sErr - } - - if e := apis.mkBlk(ctx, impl.upToken, host, &blkPutRet, c.size, seekableData, realChunkSize); e != nil { - return e - } + if err = apis.mkBlk(ctx, impl.upToken, impl.upEndpoints, &blkPutRet, c.size, seekableData, realChunkSize); err == nil { if blkPutRet.Crc32 != crc32Value || int64(blkPutRet.Offset) != chunkOffset+realChunkSize { return ErrUnmatchedChecksum } - return nil - }) + } } else { - err = doUploadAction(impl.upHostProvider, impl.extra.TryTimes, impl.extra.HostFreezeDuration, func(host string) error { - blkPutRet.Host = host - if _, sErr := seekableData.Seek(0, io.SeekStart); sErr != nil { - return sErr - } - - if e := apis.bput(ctx, impl.upToken, &blkPutRet, seekableData, realChunkSize); e != nil { - return e - } + if err = apis.bput(ctx, impl.upToken, impl.upEndpoints, &blkPutRet, seekableData, realChunkSize); err == nil { if blkPutRet.Crc32 != crc32Value || int64(blkPutRet.Offset) != chunkOffset+realChunkSize { return ErrUnmatchedChecksum } - return nil - }) + } } if err != nil { @@ -398,9 +379,7 @@ func (impl *resumeUploaderImpl) final(ctx context.Context) error { } sort.Sort(blkputRets(impl.extra.Progresses)) - err := doUploadAction(impl.upHostProvider, impl.extra.TryTimes, impl.extra.HostFreezeDuration, func(host string) error { - return impl.resumeUploaderAPIs().mkfile(ctx, impl.upToken, host, impl.ret, impl.key, impl.hasKey, impl.fileSize, impl.extra) - }) + err := impl.resumeUploaderAPIs().mkfile(ctx, impl.upToken, impl.upEndpoints, impl.ret, impl.key, impl.hasKey, impl.fileSize, impl.extra) impl.deleteUploadRecordIfNeed(err, false) return err } @@ -472,5 +451,5 @@ func (impl *resumeUploaderImpl) save(ctx context.Context) { } func (impl *resumeUploaderImpl) resumeUploaderAPIs() *resumeUploaderAPIs { - return &resumeUploaderAPIs{Client: impl.client, Cfg: impl.cfg} + return &resumeUploaderAPIs{cfg: impl.cfg, storage: impl.storage} } diff --git a/storage/resume_uploader_apis.go b/storage/resume_uploader_apis.go index ab144165..936a834e 100644 --- a/storage/resume_uploader_apis.go +++ b/storage/resume_uploader_apis.go @@ -2,22 +2,20 @@ package storage import ( "context" - "encoding/base64" "io" - "net/http" - "strconv" "strings" "time" - "github.com/qiniu/go-sdk/v7/internal/hostprovider" - - "github.com/qiniu/go-sdk/v7/client" - "github.com/qiniu/go-sdk/v7/conf" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/apis/resumable_upload_v2_complete_multipart_upload" + "github.com/qiniu/go-sdk/v7/storagev2/region" + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" ) type resumeUploaderAPIs struct { - Client *client.Client - Cfg *Config + cfg *Config + storage *apis.Storage } // BlkputRet 表示分片上传每个片上传完毕的返回值 @@ -33,16 +31,59 @@ type BlkputRet struct { blkIdx int } -func (p *resumeUploaderAPIs) mkBlk(ctx context.Context, upToken, upHost string, ret *BlkputRet, blockSize int64, body io.Reader, size int64) error { - reqUrl := upHost + "/mkblk/" + strconv.FormatInt(blockSize, 10) - - return p.Client.CallWith64(ctx, ret, "POST", reqUrl, makeHeadersForUpload(upToken), body, size) +func (p *resumeUploaderAPIs) mkBlk( + ctx context.Context, upToken string, upEndpoints region.EndpointsProvider, + ret *BlkputRet, blockSize int64, body io.Reader, size int64, +) error { + response, err := p.storage.ResumableUploadV1MakeBlock( + ctx, + &apis.ResumableUploadV1MakeBlockRequest{ + BlockSize: blockSize, + UpToken: uptoken.NewParser(upToken), + Body: internal_io.MakeReadSeekCloserFromReader(body), + }, + makeApiOptionsFromUpEndpoints(upEndpoints), + ) + if err != nil { + return err + } + *ret = BlkputRet{ + Ctx: response.Ctx, + Checksum: response.Checksum, + Crc32: uint32(response.Crc32), + Offset: uint32(response.Offset), + Host: response.Host, + ExpiredAt: response.ExpiredAt, + } + return nil } -func (p *resumeUploaderAPIs) bput(ctx context.Context, upToken string, ret *BlkputRet, body io.Reader, size int64) error { - reqUrl := ret.Host + "/bput/" + ret.Ctx + "/" + strconv.FormatUint(uint64(ret.Offset), 10) - - return p.Client.CallWith64(ctx, ret, "POST", reqUrl, makeHeadersForUpload(upToken), body, size) +func (p *resumeUploaderAPIs) bput( + ctx context.Context, upToken string, upEndpoints region.EndpointsProvider, + ret *BlkputRet, body io.Reader, size int64, +) error { + response, err := p.storage.ResumableUploadV1Bput( + ctx, + &apis.ResumableUploadV1BputRequest{ + Ctx: ret.Ctx, + ChunkOffset: int64(ret.Offset), + UpToken: uptoken.NewParser(upToken), + Body: internal_io.MakeReadSeekCloserFromReader(body), + }, + makeApiOptionsFromUpEndpoints(upEndpoints), + ) + if err != nil { + return err + } + *ret = BlkputRet{ + Ctx: response.Ctx, + Checksum: response.Checksum, + Crc32: uint32(response.Crc32), + Offset: uint32(response.Offset), + Host: response.Host, + ExpiredAt: response.ExpiredAt, + } + return nil } // RputExtra 表示分片上传额外可以指定的参数 @@ -81,32 +122,31 @@ func (extra *RputExtra) init() { } } -func (extra *RputExtra) getUpHost(useHttps bool) string { - return hostAddSchemeIfNeeded(useHttps, extra.UpHost) -} - -func (p *resumeUploaderAPIs) mkfile(ctx context.Context, upToken, upHost string, ret interface{}, key string, hasKey bool, fsize int64, extra *RputExtra) (err error) { - url := upHost + "/mkfile/" + strconv.FormatInt(fsize, 10) +func (p *resumeUploaderAPIs) mkfile( + ctx context.Context, upToken string, upEndpoints region.EndpointsProvider, + ret interface{}, key string, hasKey bool, fsize int64, extra *RputExtra, +) error { if extra == nil { extra = &RputExtra{} } - if extra.MimeType != "" { - url += "/mimeType/" + encode(extra.MimeType) - } - if hasKey { - url += "/key/" + encode(key) - } - for k, v := range extra.Params { - if (strings.HasPrefix(k, "x:") || strings.HasPrefix(k, "x-qn-meta-")) && v != "" { - url += "/" + k + "/" + encode(v) - } - } ctxs := make([]string, len(extra.Progresses)) for i, progress := range extra.Progresses { ctxs[i] = progress.Ctx } - buf := strings.Join(ctxs, ",") - return p.Client.CallWith(ctx, ret, "POST", url, makeHeadersForUpload(upToken), strings.NewReader(buf), len(buf)) + _, err := p.storage.ResumableUploadV1MakeFile( + ctx, + &apis.ResumableUploadV1MakeFileRequest{ + Size: fsize, + ObjectName: makeKeyForUploading(key, hasKey), + MimeType: extra.MimeType, + CustomData: makeCustomData(extra.Params), + UpToken: uptoken.NewParser(upToken), + Body: internal_io.MakeReadSeekCloserFromReader(strings.NewReader(strings.Join(ctxs, ","))), + ResponseBody: ret, + }, + makeApiOptionsFromUpEndpoints(upEndpoints), + ) + return err } // InitPartsRet 表示分片上传 v2 初始化完毕的返回值 @@ -115,10 +155,27 @@ type InitPartsRet struct { ExpireAt int64 `json:"expireAt"` } -func (p *resumeUploaderAPIs) initParts(ctx context.Context, upToken, upHost, bucket, key string, hasKey bool, ret *InitPartsRet) error { - reqUrl := upHost + "/buckets/" + bucket + "/objects/" + encodeV2(key, hasKey) + "/uploads" - - return p.Client.CallWith(ctx, ret, "POST", reqUrl, makeHeadersForUploadEx(upToken, ""), nil, 0) +func (p *resumeUploaderAPIs) initParts( + ctx context.Context, upToken string, upEndpoints region.EndpointsProvider, + bucket, key string, hasKey bool, ret *InitPartsRet, +) error { + response, err := p.storage.ResumableUploadV2InitiateMultipartUpload( + ctx, + &apis.ResumableUploadV2InitiateMultipartUploadRequest{ + BucketName: bucket, + ObjectName: makeKeyForUploading(key, hasKey), + UpToken: uptoken.NewParser(upToken), + }, + makeApiOptionsFromUpEndpoints(upEndpoints), + ) + if err != nil { + return err + } + *ret = InitPartsRet{ + UploadID: response.UploadId, + ExpireAt: response.ExpiredAt, + } + return nil } // UploadPartsRet 表示分片上传 v2 每个片上传完毕的返回值 @@ -127,10 +184,31 @@ type UploadPartsRet struct { MD5 string `json:"md5"` } -func (p *resumeUploaderAPIs) uploadParts(ctx context.Context, upToken, upHost, bucket, key string, hasKey bool, uploadId string, partNumber int64, partMD5 string, ret *UploadPartsRet, body io.Reader, size int64) error { - reqUrl := upHost + "/buckets/" + bucket + "/objects/" + encodeV2(key, hasKey) + "/uploads/" + uploadId + "/" + strconv.FormatInt(partNumber, 10) - - return p.Client.CallWith64(ctx, ret, "PUT", reqUrl, makeHeadersForUploadPart(upToken, partMD5), body, size) +func (p *resumeUploaderAPIs) uploadParts( + ctx context.Context, upToken string, upEndpoints region.EndpointsProvider, + bucket, key string, hasKey bool, uploadId string, partNumber int64, partMD5 string, ret *UploadPartsRet, body io.Reader, size int64, +) error { + response, err := p.storage.ResumableUploadV2UploadPart( + ctx, + &apis.ResumableUploadV2UploadPartRequest{ + BucketName: bucket, + ObjectName: makeKeyForUploading(key, hasKey), + UploadId: uploadId, + PartNumber: partNumber, + Md5: partMD5, + UpToken: uptoken.NewParser(upToken), + Body: internal_io.MakeReadSeekCloserFromLimitedReader(body, size), + }, + makeApiOptionsFromUpEndpoints(upEndpoints), + ) + if err != nil { + return err + } + *ret = UploadPartsRet{ + Etag: response.Etag, + MD5: response.Md5, + } + return nil } type UploadPartInfo struct { @@ -173,83 +251,70 @@ func (extra *RputV2Extra) init() { } } -func (extra *RputV2Extra) getUpHost(useHttps bool) string { - return hostAddSchemeIfNeeded(useHttps, extra.UpHost) -} - -func hostAddSchemeIfNeeded(useHttps bool, host string) string { - if host == "" { - return "" - } else if strings.Contains(host, "://") { - return host - } else { - return endpoint(useHttps, host) - } -} - -func (p *resumeUploaderAPIs) completeParts(ctx context.Context, upToken, upHost string, ret interface{}, bucket, key string, hasKey bool, uploadId string, extra *RputV2Extra) (err error) { - type CompletePartBody struct { - Parts []UploadPartInfo `json:"parts"` - MimeType string `json:"mimeType,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` - CustomVars map[string]string `json:"customVars,omitempty"` - } - if extra == nil { - extra = &RputV2Extra{} - } - completePartBody := CompletePartBody{ - Parts: extra.Progresses, - MimeType: extra.MimeType, - Metadata: extra.Metadata, - CustomVars: make(map[string]string), +func (p *resumeUploaderAPIs) completeParts( + ctx context.Context, upToken string, upEndpoints region.EndpointsProvider, ret interface{}, + bucket, key string, hasKey bool, uploadId string, extra *RputV2Extra, +) error { + parts := make([]resumable_upload_v2_complete_multipart_upload.PartInfo, 0, len(extra.Progresses)) + for i := range extra.Progresses { + parts = append(parts, resumable_upload_v2_complete_multipart_upload.PartInfo{ + PartNumber: extra.Progresses[i].PartNumber, + Etag: extra.Progresses[i].Etag, + }) } + customVars := make(map[string]string, len(extra.CustomVars)) for k, v := range extra.CustomVars { if strings.HasPrefix(k, "x:") && v != "" { - completePartBody.CustomVars[k] = v + customVars[k] = v } } - - reqUrl := upHost + "/buckets/" + bucket + "/objects/" + encodeV2(key, hasKey) + "/uploads/" + uploadId - - return p.Client.CallWithJson(ctx, ret, "POST", reqUrl, makeHeadersForUploadEx(upToken, conf.CONTENT_TYPE_JSON), &completePartBody) + _, err := p.storage.ResumableUploadV2CompleteMultipartUpload( + ctx, + &apis.ResumableUploadV2CompleteMultipartUploadRequest{ + BucketName: bucket, + ObjectName: makeKeyForUploading(key, hasKey), + UploadId: uploadId, + UpToken: uptoken.NewParser(upToken), + Parts: parts, + MimeType: extra.MimeType, + Metadata: extra.Metadata, + CustomVars: customVars, + ResponseBody: ret, + }, + makeApiOptionsFromUpEndpoints(upEndpoints), + ) + return err } func (p *resumeUploaderAPIs) upHost(ak, bucket string) (upHost string, err error) { - return getUpHost(p.Cfg, 0, 0, ak, bucket) + return getUpHost(p.cfg, 0, 0, ak, bucket) } -func (p *resumeUploaderAPIs) upHostProvider(ak, bucket string, retryMax int, hostFreezeDuration time.Duration) (hostProvider hostprovider.HostProvider, err error) { - return getUpHostProvider(p.Cfg, retryMax, hostFreezeDuration, ak, bucket) -} - -func makeHeadersForUpload(upToken string) http.Header { - return makeHeadersForUploadEx(upToken, conf.CONTENT_TYPE_OCTET) -} - -func makeHeadersForUploadPart(upToken, partMD5 string) http.Header { - headers := makeHeadersForUpload(upToken) - headers.Add("Content-MD5", partMD5) - return headers +func makeEndpointsFromUpHost(upHost string) region.EndpointsProvider { + if upHost != "" { + return ®ion.Endpoints{Preferred: []string{upHost}} + } + return nil } -func makeHeadersForUploadEx(upToken, contentType string) http.Header { - headers := http.Header{} - if contentType != "" { - headers.Add("Content-Type", contentType) +func makeApiOptionsFromUpEndpoints(upEndpoints region.EndpointsProvider) *apis.Options { + if upEndpoints != nil { + return &apis.Options{ + OverwrittenEndpoints: upEndpoints, + } } - headers.Add("Authorization", "UpToken "+upToken) - return headers + return nil } -func encode(raw string) string { - return base64.URLEncoding.EncodeToString([]byte(raw)) +func makeApiOptionsFromUpHost(upHost string) *apis.Options { + return makeApiOptionsFromUpEndpoints(makeEndpointsFromUpHost(upHost)) } -func encodeV2(key string, hasKey bool) string { - if !hasKey { - return "~" +func makeKeyForUploading(key string, hasKey bool) *string { + if hasKey { + return &key } else { - return encode(key) + return nil } } diff --git a/storage/resume_uploader_v2.go b/storage/resume_uploader_v2.go index 794578b7..edd5d324 100644 --- a/storage/resume_uploader_v2.go +++ b/storage/resume_uploader_v2.go @@ -6,19 +6,24 @@ import ( "crypto/md5" "encoding/hex" "encoding/json" - "github.com/qiniu/go-sdk/v7/client" - "github.com/qiniu/go-sdk/v7/internal/hostprovider" "io" "os" "path/filepath" "sort" "sync" + + "github.com/qiniu/go-sdk/v7/client" + "github.com/qiniu/go-sdk/v7/internal/clientv2" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/region" ) // ResumeUploaderV2 表示一个分片上传 v2 的对象 type ResumeUploaderV2 struct { - Client *client.Client - Cfg *Config + Client *client.Client + Cfg *Config + storage *apis.Storage } // NewResumeUploaderV2 表示构建一个新的分片上传的对象 @@ -36,9 +41,18 @@ func NewResumeUploaderV2Ex(cfg *Config, clt *client.Client) *ResumeUploaderV2 { clt = &client.DefaultClient } + opts := http_client.Options{ + BasicHTTPClient: clt.Client, + UseInsecureProtocol: !cfg.UseHTTPS, + } + if region := cfg.GetRegion(); region != nil { + opts.Regions = region + } + return &ResumeUploaderV2{ - Client: clt, - Cfg: cfg, + Client: clt, + Cfg: cfg, + storage: apis.NewStorage(&opts), } } @@ -105,31 +119,22 @@ func (p *ResumeUploaderV2) rput(ctx context.Context, ret interface{}, upToken st extra.init() var ( - accessKey, bucket, recorderKey string - fileInfo os.FileInfo = nil - hostProvider hostprovider.HostProvider = nil + bucket, recorderKey string + fileInfo os.FileInfo = nil ) if fileDetails != nil { fileInfo = fileDetails.fileInfo } - if accessKey, bucket, err = getAkBucketFromUploadToken(upToken); err != nil { + if _, bucket, err = getAkBucketFromUploadToken(upToken); err != nil { return } - if extra.UpHost != "" { - hostProvider = hostprovider.NewWithHosts([]string{extra.getUpHost(p.Cfg.UseHTTPS)}) - } else { - hostProvider, err = p.resumeUploaderAPIs().upHostProvider(accessKey, bucket, extra.TryTimes, extra.HostFreezeDuration) - if err != nil { - return - } - } recorderKey = getRecorderKey(extra.Recorder, upToken, key, "v2", extra.PartSize, fileDetails) return uploadByWorkers( - newResumeUploaderV2Impl(p, bucket, key, hasKey, upToken, hostProvider, fileInfo, extra, ret, recorderKey), + newResumeUploaderV2Impl(p, bucket, key, hasKey, upToken, makeEndpointsFromUpHost(extra.UpHost), fileInfo, extra, ret, recorderKey), ctx, newSizedChunkReader(f, fsize, extra.PartSize)) } @@ -139,25 +144,14 @@ func (p *ResumeUploaderV2) rputWithoutSize(ctx context.Context, ret interface{}, } extra.init() - var ( - accessKey, bucket string - hostProvider hostprovider.HostProvider = nil - ) + var bucket string - if accessKey, bucket, err = getAkBucketFromUploadToken(upToken); err != nil { + if _, bucket, err = getAkBucketFromUploadToken(upToken); err != nil { return } - if extra.UpHost != "" { - hostProvider = hostprovider.NewWithHosts([]string{extra.getUpHost(p.Cfg.UseHTTPS)}) - } else { - hostProvider, err = p.resumeUploaderAPIs().upHostProvider(accessKey, bucket, extra.TryTimes, extra.HostFreezeDuration) - if err != nil { - return - } - } return uploadByWorkers( - newResumeUploaderV2Impl(p, bucket, key, hasKey, upToken, hostProvider, nil, extra, ret, ""), + newResumeUploaderV2Impl(p, bucket, key, hasKey, upToken, makeEndpointsFromUpHost(extra.UpHost), nil, extra, ret, ""), ctx, newUnsizedChunkReader(r, extra.PartSize)) } @@ -186,17 +180,17 @@ func (p *ResumeUploaderV2) rputFile(ctx context.Context, ret interface{}, upToke // 初始化块请求 func (p *ResumeUploaderV2) InitParts(ctx context.Context, upToken, upHost, bucket, key string, hasKey bool, ret *InitPartsRet) error { - return p.resumeUploaderAPIs().initParts(ctx, upToken, upHost, bucket, key, hasKey, ret) + return p.resumeUploaderAPIs().initParts(ctx, upToken, makeEndpointsFromUpHost(upHost), bucket, key, hasKey, ret) } // 发送块请求 func (p *ResumeUploaderV2) UploadParts(ctx context.Context, upToken, upHost, bucket, key string, hasKey bool, uploadId string, partNumber int64, partMD5 string, ret *UploadPartsRet, body io.Reader, size int) error { - return p.resumeUploaderAPIs().uploadParts(ctx, upToken, upHost, bucket, key, hasKey, uploadId, partNumber, partMD5, ret, body, int64(size)) + return p.resumeUploaderAPIs().uploadParts(ctx, upToken, makeEndpointsFromUpHost(upHost), bucket, key, hasKey, uploadId, partNumber, partMD5, ret, body, int64(size)) } // 完成块请求 func (p *ResumeUploaderV2) CompleteParts(ctx context.Context, upToken, upHost string, ret interface{}, bucket, key string, hasKey bool, uploadId string, extra *RputV2Extra) (err error) { - return p.resumeUploaderAPIs().completeParts(ctx, upToken, upHost, ret, bucket, key, hasKey, uploadId, extra) + return p.resumeUploaderAPIs().completeParts(ctx, upToken, makeEndpointsFromUpHost(upHost), ret, bucket, key, hasKey, uploadId, extra) } func (p *ResumeUploaderV2) UpHost(ak, bucket string) (upHost string, err error) { @@ -204,27 +198,27 @@ func (p *ResumeUploaderV2) UpHost(ak, bucket string) (upHost string, err error) } func (p *ResumeUploaderV2) resumeUploaderAPIs() *resumeUploaderAPIs { - return &resumeUploaderAPIs{Client: p.Client, Cfg: p.Cfg} + return &resumeUploaderAPIs{cfg: p.Cfg, storage: p.storage} } type ( // 用于实现 resumeUploaderBase 的 V2 分片接口 resumeUploaderV2Impl struct { - client *client.Client - cfg *Config - bucket string - key string - hasKey bool - uploadId string - expiredAt int64 - upToken string - upHostProvider hostprovider.HostProvider - extra *RputV2Extra - fileInfo os.FileInfo - recorderKey string - ret interface{} - lock sync.Mutex - bufPool *sync.Pool + cfg *Config + storage *apis.Storage + bucket string + key string + hasKey bool + uploadId string + expiredAt int64 + upToken string + upEndpoints region.EndpointsProvider + extra *RputV2Extra + fileInfo os.FileInfo + recorderKey string + ret interface{} + lock sync.Mutex + bufPool *sync.Pool } resumeUploaderV2RecoveryInfoContext struct { @@ -245,19 +239,36 @@ type ( } ) -func newResumeUploaderV2Impl(resumeUploader *ResumeUploaderV2, bucket, key string, hasKey bool, upToken string, upHostProvider hostprovider.HostProvider, fileInfo os.FileInfo, extra *RputV2Extra, ret interface{}, recorderKey string) *resumeUploaderV2Impl { +func newResumeUploaderV2Impl(resumeUploader *ResumeUploaderV2, bucket, key string, hasKey bool, upToken string, upEndpoints region.EndpointsProvider, fileInfo os.FileInfo, extra *RputV2Extra, ret interface{}, recorderKey string) *resumeUploaderV2Impl { + opts := http_client.Options{ + BasicHTTPClient: resumeUploader.Client.Client, + UseInsecureProtocol: !resumeUploader.Cfg.UseHTTPS, + } + if region := resumeUploader.Cfg.GetRegion(); region != nil { + opts.Regions = region + } + if extra != nil { + if extra.TryTimes > 0 { + opts.HostRetryConfig = &clientv2.RetryConfig{ + RetryMax: extra.TryTimes, + } + } + if extra.HostFreezeDuration > 0 { + opts.HostFreezeDuration = extra.HostFreezeDuration + } + } return &resumeUploaderV2Impl{ - client: resumeUploader.Client, - cfg: resumeUploader.Cfg, - bucket: bucket, - key: key, - hasKey: hasKey, - upToken: upToken, - upHostProvider: upHostProvider, - fileInfo: fileInfo, - recorderKey: recorderKey, - extra: extra, - ret: ret, + cfg: resumeUploader.Cfg, + bucket: bucket, + key: key, + hasKey: hasKey, + upToken: upToken, + upEndpoints: upEndpoints, + fileInfo: fileInfo, + recorderKey: recorderKey, + extra: extra, + ret: ret, + storage: apis.NewStorage(&opts), bufPool: &sync.Pool{ New: func() interface{} { return bytes.NewBuffer(make([]byte, 0, extra.PartSize)) @@ -283,9 +294,7 @@ func (impl *resumeUploaderV2Impl) initUploader(ctx context.Context) ([]int64, er } } - err := doUploadAction(impl.upHostProvider, impl.extra.TryTimes, impl.extra.HostFreezeDuration, func(host string) error { - return impl.resumeUploaderAPIs().initParts(ctx, impl.upToken, host, impl.bucket, impl.key, impl.hasKey, &ret) - }) + err := impl.resumeUploaderAPIs().initParts(ctx, impl.upToken, impl.upEndpoints, impl.bucket, impl.key, impl.hasKey, &ret) if err == nil { impl.uploadId = ret.UploadID impl.expiredAt = ret.ExpireAt @@ -295,18 +304,16 @@ func (impl *resumeUploaderV2Impl) initUploader(ctx context.Context) ([]int64, er func (impl *resumeUploaderV2Impl) uploadChunk(ctx context.Context, c chunk) error { var ( - apis = impl.resumeUploaderAPIs() - ret UploadPartsRet - chunkSize int64 - buffer = impl.bufPool.Get().(*bytes.Buffer) - err error + apis = impl.resumeUploaderAPIs() + ret UploadPartsRet + buffer = impl.bufPool.Get().(*bytes.Buffer) ) defer impl.bufPool.Put(buffer) partNumber := c.id + 1 hasher := md5.New() buffer.Reset() - chunkSize, err = io.Copy(hasher, io.TeeReader(io.NewSectionReader(c.reader, 0, c.size), buffer)) + chunkSize, err := io.Copy(hasher, io.TeeReader(io.NewSectionReader(c.reader, 0, c.size), buffer)) if err != nil { impl.extra.NotifyErr(partNumber, err) return err @@ -315,16 +322,8 @@ func (impl *resumeUploaderV2Impl) uploadChunk(ctx context.Context, c chunk) erro } md5Value := hex.EncodeToString(hasher.Sum(nil)) - seekableData := bytes.NewReader(buffer.Bytes()) - err = doUploadAction(impl.upHostProvider, impl.extra.TryTimes, impl.extra.HostFreezeDuration, func(host string) error { - if _, sErr := seekableData.Seek(0, io.SeekStart); sErr != nil { - return sErr - } - - return apis.uploadParts(ctx, impl.upToken, host, impl.bucket, impl.key, impl.hasKey, impl.uploadId, - partNumber, md5Value, &ret, seekableData, chunkSize) - }) + err = apis.uploadParts(ctx, impl.upToken, impl.upEndpoints, impl.bucket, impl.key, impl.hasKey, impl.uploadId, partNumber, md5Value, &ret, seekableData, chunkSize) if err != nil { impl.extra.NotifyErr(partNumber, err) impl.deleteUploadRecordIfNeed(err, false) @@ -355,9 +354,7 @@ func (impl *resumeUploaderV2Impl) final(ctx context.Context) error { } sort.Sort(uploadPartInfos(impl.extra.Progresses)) - err := doUploadAction(impl.upHostProvider, impl.extra.TryTimes, impl.extra.HostFreezeDuration, func(host string) error { - return impl.resumeUploaderAPIs().completeParts(ctx, impl.upToken, host, impl.ret, impl.bucket, impl.key, impl.hasKey, impl.uploadId, impl.extra) - }) + err := impl.resumeUploaderAPIs().completeParts(ctx, impl.upToken, impl.upEndpoints, impl.ret, impl.bucket, impl.key, impl.hasKey, impl.uploadId, impl.extra) impl.deleteUploadRecordIfNeed(err, false) return err } @@ -430,5 +427,5 @@ func (impl *resumeUploaderV2Impl) save(ctx context.Context) { } func (impl *resumeUploaderV2Impl) resumeUploaderAPIs() *resumeUploaderAPIs { - return &resumeUploaderAPIs{Client: impl.client, Cfg: impl.cfg} + return &resumeUploaderAPIs{cfg: impl.cfg, storage: impl.storage} } diff --git a/storage/token.go b/storage/token.go index b2caedd2..0afef1b5 100644 --- a/storage/token.go +++ b/storage/token.go @@ -1,13 +1,12 @@ package storage import ( - "encoding/base64" + "context" "encoding/json" - "errors" - "strings" "time" "github.com/qiniu/go-sdk/v7/auth" + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" ) // PutPolicy 表示文件上传的上传策略,参考 https://developer.qiniu.com/kodo/manual/1206/put-policy @@ -145,29 +144,14 @@ func (p PutPolicy) uploadToken(cred *auth.Credentials) (token string) { } func getAkBucketFromUploadToken(token string) (ak, bucket string, err error) { - items := strings.Split(token, ":") - // KODO-11919 - if len(items) == 5 && items[0] == "" { - items = items[2:] - } else if len(items) != 3 { - err = errors.New("invalid upload token, format error") + parser := uptoken.NewParser(token) + if ak, err = parser.GetAccessKey(context.Background()); err != nil { return } - - ak = items[0] - policyBytes, dErr := base64.URLEncoding.DecodeString(items[2]) - if dErr != nil { - err = errors.New("invalid upload token, invalid put policy") + upPolicy, err := parser.GetPutPolicy(context.Background()) + if err != nil { return } - - putPolicy := PutPolicy{} - uErr := json.Unmarshal(policyBytes, &putPolicy) - if uErr != nil { - err = errors.New("invalid upload token, invalid put policy") - return - } - - bucket = strings.Split(putPolicy.Scope, ":")[0] + bucket, err = upPolicy.GetBucketName() return } diff --git a/storage/uc.go b/storage/uc.go index 1f913dad..d3fa3fa7 100644 --- a/storage/uc.go +++ b/storage/uc.go @@ -10,6 +10,9 @@ import ( "time" "github.com/qiniu/go-sdk/v7/internal/clientv2" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/apis/set_bucket_cors_rules" + "github.com/qiniu/go-sdk/v7/storagev2/apis/set_bucket_taggings" "github.com/qiniu/go-sdk/v7/auth" ) @@ -226,28 +229,25 @@ func (b *BucketInfo) TokenAntiLeechModeOn() bool { func (m *BucketManager) GetBucketInfo(bucketName string) (bucketInfo BucketInfo, err error) { reqURL := fmt.Sprintf("%s/v2/bucketInfo?bucket=%s", getUcHost(m.Cfg.UseHTTPS), bucketName) err = clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, + Context: nil, + Method: clientv2.RequestMethodPost, + Url: reqURL, + Header: nil, + GetBody: nil, }, &bucketInfo) return bucketInfo, err } // SetRemark 设置空间备注信息 -func (m *BucketManager) SetRemark(bucketName, remark string) (err error) { - reqURL := fmt.Sprintf("%s/buckets/%s?remark", getUcHost(m.Cfg.UseHTTPS), bucketName) - body := struct { - Remark string `json:"remark"` - }{Remark: remark} - err = clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPut, - Url: reqURL, - Header: nil, - BodyCreator: clientv2.RequestBodyCreatorOfJson(body), - }, nil) +func (m *BucketManager) SetRemark(bucketName, remark string) error { + _, err := m.apiClient.SetBucketRemark( + context.Background(), + &apis.SetBucketRemarkRequest{ + Bucket: bucketName, + Remark: remark, + }, + m.makeRequestOptions(), + ) return err } @@ -255,11 +255,11 @@ func (m *BucketManager) SetRemark(bucketName, remark string) (err error) { func (m *BucketManager) BucketInfosInRegion(region RegionID, statistics bool) (bucketInfos []BucketSummary, err error) { reqURL := fmt.Sprintf("%s/v2/bucketInfos?region=%s&fs=%t", getUcHost(m.Cfg.UseHTTPS), string(region), statistics) err = clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, + Context: nil, + Method: clientv2.RequestMethodPost, + Url: reqURL, + Header: nil, + GetBody: nil, }, &bucketInfos) return bucketInfos, err } @@ -309,80 +309,80 @@ type BucketLifeCycleRule struct { } // SetBucketLifeCycleRule 设置存储空间内文件的生命周期规则 -func (m *BucketManager) AddBucketLifeCycleRule(bucketName string, lifeCycleRule *BucketLifeCycleRule) (err error) { - params := make(map[string][]string) - - // 没有检查参数的合法性,交给服务端检查 - params["bucket"] = []string{bucketName} - params["name"] = []string{lifeCycleRule.Name} - params["prefix"] = []string{lifeCycleRule.Prefix} - params["delete_after_days"] = []string{strconv.Itoa(lifeCycleRule.DeleteAfterDays)} - params["to_ia_after_days"] = []string{strconv.Itoa(lifeCycleRule.ToLineAfterDays)} - params["to_archive_after_days"] = []string{strconv.Itoa(lifeCycleRule.ToArchiveAfterDays)} - params["to_archive_ir_after_days"] = []string{strconv.Itoa(lifeCycleRule.ToArchiveIRAfterDays)} - params["to_deep_archive_after_days"] = []string{strconv.Itoa(lifeCycleRule.ToDeepArchiveAfterDays)} - - reqURL := getUcHost(m.Cfg.UseHTTPS) + "/rules/add" - return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: clientv2.RequestBodyCreatorForm(params), - }, nil) +func (m *BucketManager) AddBucketLifeCycleRule(bucketName string, lifeCycleRule *BucketLifeCycleRule) error { + _, err := m.apiClient.AddBucketRules( + context.Background(), + &apis.AddBucketRulesRequest{ + Bucket: bucketName, + Name: lifeCycleRule.Name, + Prefix: lifeCycleRule.Prefix, + DeleteAfterDays: int64(lifeCycleRule.DeleteAfterDays), + ToIaAfterDays: int64(lifeCycleRule.ToLineAfterDays), + ToArchiveAfterDays: int64(lifeCycleRule.ToArchiveAfterDays), + ToArchiveIrAfterDays: int64(lifeCycleRule.ToArchiveIRAfterDays), + ToDeepArchiveAfterDays: int64(lifeCycleRule.ToDeepArchiveAfterDays), + }, + m.makeRequestOptions(), + ) + return err } // DelBucketLifeCycleRule 删除特定存储空间上设定的规则 -func (m *BucketManager) DelBucketLifeCycleRule(bucketName, ruleName string) (err error) { - params := make(map[string][]string) - - params["bucket"] = []string{bucketName} - params["name"] = []string{ruleName} - - reqURL := getUcHost(m.Cfg.UseHTTPS) + "/rules/delete" - return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: clientv2.RequestBodyCreatorForm(params), - }, nil) +func (m *BucketManager) DelBucketLifeCycleRule(bucketName, ruleName string) error { + _, err := m.apiClient.DeleteBucketRules( + context.Background(), + &apis.DeleteBucketRulesRequest{ + Bucket: bucketName, + Name: ruleName, + }, + m.makeRequestOptions(), + ) + return err } // UpdateBucketLifeCycleRule 更新特定存储空间上的生命周期规则 -func (m *BucketManager) UpdateBucketLifeCycleRule(bucketName string, rule *BucketLifeCycleRule) (err error) { - params := make(map[string][]string) - - params["bucket"] = []string{bucketName} - params["name"] = []string{rule.Name} - params["prefix"] = []string{rule.Prefix} - params["delete_after_days"] = []string{strconv.Itoa(rule.DeleteAfterDays)} - params["to_line_after_days"] = []string{strconv.Itoa(rule.ToLineAfterDays)} - params["to_archive_after_days"] = []string{strconv.Itoa(rule.ToArchiveAfterDays)} - params["to_archive_ir_after_days"] = []string{strconv.Itoa(rule.ToArchiveIRAfterDays)} - params["to_deep_archive_after_days"] = []string{strconv.Itoa(rule.ToDeepArchiveAfterDays)} - - reqURL := getUcHost(m.Cfg.UseHTTPS) + "/rules/update" - return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: clientv2.RequestBodyCreatorForm(params), - }, nil) +func (m *BucketManager) UpdateBucketLifeCycleRule(bucketName string, rule *BucketLifeCycleRule) error { + _, err := m.apiClient.UpdateBucketRules( + context.Background(), + &apis.UpdateBucketRulesRequest{ + Bucket: bucketName, + Name: rule.Name, + Prefix: rule.Prefix, + DeleteAfterDays: int64(rule.DeleteAfterDays), + ToIaAfterDays: int64(rule.ToLineAfterDays), + ToArchiveAfterDays: int64(rule.ToArchiveAfterDays), + ToArchiveIrAfterDays: int64(rule.ToArchiveIRAfterDays), + ToDeepArchiveAfterDays: int64(rule.ToDeepArchiveAfterDays), + }, + m.makeRequestOptions(), + ) + return err } // GetBucketLifeCycleRule 获取指定空间上设置的生命周期规则 -func (m *BucketManager) GetBucketLifeCycleRule(bucketName string) (rules []BucketLifeCycleRule, err error) { - reqURL := getUcHost(m.Cfg.UseHTTPS) + "/rules/get?bucket=" + bucketName - err = clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodGet, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, &rules) - return rules, err +func (m *BucketManager) GetBucketLifeCycleRule(bucketName string) ([]BucketLifeCycleRule, error) { + response, err := m.apiClient.GetBucketRules( + context.Background(), + &apis.GetBucketRulesRequest{ + Bucket: bucketName, + }, + m.makeRequestOptions(), + ) + if err != nil { + return nil, err + } + rules := make([]BucketLifeCycleRule, 0, len(response.BucketRules)) + for _, rule := range response.BucketRules { + rules = append(rules, BucketLifeCycleRule{ + Name: rule.Name, + Prefix: rule.Prefix, + DeleteAfterDays: int(rule.DeleteAfterDays), + ToLineAfterDays: int(rule.ToIaAfterDays), + ToArchiveAfterDays: int(rule.ToArchiveAfterDays), + ToDeepArchiveAfterDays: int(rule.ToDeepArchiveAfterDays), + }) + } + return rules, nil } // BucketEnvent 定义了存储空间发生事件时候的通知规则 @@ -441,11 +441,11 @@ func (m *BucketManager) AddBucketEvent(bucket string, rule *BucketEventRule) (er params := rule.Params(bucket) reqURL := getUcHost(m.Cfg.UseHTTPS) + "/events/add" return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: clientv2.RequestBodyCreatorForm(params), + Context: nil, + Method: clientv2.RequestMethodPost, + Url: reqURL, + Header: nil, + GetBody: clientv2.GetFormRequestBody(params), }, nil) } @@ -457,11 +457,11 @@ func (m *BucketManager) DelBucketEvent(bucket, ruleName string) (err error) { reqURL := getUcHost(m.Cfg.UseHTTPS) + "/events/delete" return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: clientv2.RequestBodyCreatorForm(params), + Context: nil, + Method: clientv2.RequestMethodPost, + Url: reqURL, + Header: nil, + GetBody: clientv2.GetFormRequestBody(params), }, nil) } @@ -470,11 +470,11 @@ func (m *BucketManager) UpdateBucketEnvent(bucket string, rule *BucketEventRule) params := rule.Params(bucket) reqURL := getUcHost(m.Cfg.UseHTTPS) + "/events/update" return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: clientv2.RequestBodyCreatorForm(params), + Context: nil, + Method: clientv2.RequestMethodPost, + Url: reqURL, + Header: nil, + GetBody: clientv2.GetFormRequestBody(params), }, nil) } @@ -482,11 +482,11 @@ func (m *BucketManager) UpdateBucketEnvent(bucket string, rule *BucketEventRule) func (m *BucketManager) GetBucketEvent(bucket string) (rule []BucketEventRule, err error) { reqURL := getUcHost(m.Cfg.UseHTTPS) + "/events/get?bucket=" + bucket err = clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodGet, - Url: reqURL, - Header: nil, - BodyCreator: nil, + Context: nil, + Method: clientv2.RequestMethodGet, + Url: reqURL, + Header: nil, + GetBody: nil, }, &rule) return rule, err } @@ -521,28 +521,51 @@ type CorsRule struct { } // AddCorsRules 设置指定存储空间的跨域规则 -func (m *BucketManager) AddCorsRules(bucket string, corsRules []CorsRule) (err error) { - reqURL := getUcHost(m.Cfg.UseHTTPS) + "/corsRules/set/" + bucket - return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: clientv2.RequestBodyCreatorOfJson(corsRules), - }, nil) +func (m *BucketManager) AddCorsRules(bucket string, corsRules []CorsRule) error { + rules := make(set_bucket_cors_rules.CORSRules, 0, len(corsRules)) + for _, rule := range corsRules { + rules = append(rules, set_bucket_cors_rules.CORSRule{ + AllowedOrigin: rule.AllowedMethod, + AllowedMethod: rule.AllowedMethod, + AllowedHeader: rule.AllowedHeader, + ExposedHeader: rule.ExposedHeader, + MaxAge: rule.MaxAge, + }) + } + _, err := m.apiClient.SetBucketCORSRules( + context.Background(), + &apis.SetBucketCORSRulesRequest{ + Bucket: bucket, + CORSRules: rules, + }, + m.makeRequestOptions(), + ) + return err } // GetCorsRules 获取指定存储空间的跨域规则 -func (m *BucketManager) GetCorsRules(bucket string) (corsRules []CorsRule, err error) { - reqURL := getUcHost(m.Cfg.UseHTTPS) + "/corsRules/get/" + bucket - err = clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodGet, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, &corsRules) - return corsRules, err +func (m *BucketManager) GetCorsRules(bucket string) ([]CorsRule, error) { + response, err := m.apiClient.GetBucketCORSRules( + context.Background(), + &apis.GetBucketCORSRulesRequest{ + Bucket: bucket, + }, + m.makeRequestOptions(), + ) + if err != nil { + return nil, err + } + rules := make([]CorsRule, 0, len(response.CORSRules)) + for _, rule := range response.CORSRules { + rules = append(rules, CorsRule{ + AllowedOrigin: rule.AllowedOrigin, + AllowedMethod: rule.AllowedMethod, + AllowedHeader: rule.AllowedHeader, + ExposedHeader: rule.ExposedHeader, + MaxAge: rule.MaxAge, + }) + } + return rules, nil } // BucketQuota 七牛存储空间的配额信息 @@ -560,42 +583,50 @@ type BucketQuota struct { // SetBucketQuota 设置存储空间的配额限制 // 配额限制主要是两块, 空间存储量的限制和空间文件数限制 -func (m *BucketManager) SetBucketQuota(bucket string, size, count int64) (err error) { - reqURL := fmt.Sprintf("%s/setbucketquota/%s/size/%d/count/%d", getUcHost(m.Cfg.UseHTTPS), bucket, size, count) - return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, nil) +func (m *BucketManager) SetBucketQuota(bucket string, size, count int64) error { + _, err := m.apiClient.SetBucketQuota( + context.Background(), + &apis.SetBucketQuotaRequest{ + Bucket: bucket, + Size: size, + Count: count, + }, + m.makeRequestOptions(), + ) + return err } // GetBucketQuota 获取存储空间的配额信息 func (m *BucketManager) GetBucketQuota(bucket string) (quota BucketQuota, err error) { - reqURL := fmt.Sprintf("%s/getbucketquota/%s", getUcHost(m.Cfg.UseHTTPS), bucket) - err = clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, "a) - return quota, err + response, err := m.apiClient.GetBucketQuota( + context.Background(), + &apis.GetBucketQuotaRequest{ + Bucket: bucket, + }, + m.makeRequestOptions(), + ) + if err != nil { + return BucketQuota{}, err + } + return BucketQuota{ + Size: response.Size, + Count: response.Count, + }, nil } // SetBucketAccessStyle 可以用来开启或关闭制定存储空间的原图保护 // mode - 1 ==> 开启原图保护 // mode - 0 ==> 关闭原图保护 func (m *BucketManager) SetBucketAccessStyle(bucket string, mode int) error { - reqURL := fmt.Sprintf("%s/accessMode/%s/mode/%d", getUcHost(m.Cfg.UseHTTPS), bucket, mode) - return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, nil) + _, err := m.apiClient.SetBucketAccessMode( + context.Background(), + &apis.SetBucketAccessModeRequest{ + Bucket: bucket, + Mode: int64(mode), + }, + m.makeRequestOptions(), + ) + return err } // TurnOffBucketProtected 开启指定存储空间的原图保护 @@ -611,14 +642,15 @@ func (m *BucketManager) TurnOffBucketProtected(bucket string) error { // SetBucketMaxAge 设置指定存储空间的MaxAge响应头 // maxAge <= 0时,表示使用默认值31536000 func (m *BucketManager) SetBucketMaxAge(bucket string, maxAge int64) error { - reqURL := fmt.Sprintf("%s/maxAge?bucket=%s&maxAge=%d", getUcHost(m.Cfg.UseHTTPS), bucket, maxAge) - return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, nil) + _, err := m.apiClient.SetBucketMaxAge( + context.Background(), + &apis.SetBucketMaxAgeRequest{ + Bucket: bucket, + MaxAge: maxAge, + }, + m.makeRequestOptions(), + ) + return err } // SetBucketAccessMode 设置指定空间的私有属性 @@ -626,14 +658,15 @@ func (m *BucketManager) SetBucketMaxAge(bucket string, maxAge int64) error { // mode - 1 表示设置空间为私有空间, 私有空间访问需要鉴权 // mode - 0 表示设置空间为公开空间 func (m *BucketManager) SetBucketAccessMode(bucket string, mode int) error { - reqURL := fmt.Sprintf("%s/private?bucket=%s&private=%d", getUcHost(m.Cfg.UseHTTPS), bucket, mode) - return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, nil) + _, err := m.apiClient.SetBucketPrivate( + context.Background(), + &apis.SetBucketPrivateRequest{ + Bucket: bucket, + IsPrivate: int64(mode), + }, + m.makeRequestOptions(), + ) + return err } // MakeBucketPublic 设置空间为公有空间 @@ -659,11 +692,11 @@ func (m *BucketManager) TurnOffIndexPage(bucket string) error { func (m *BucketManager) setIndexPage(bucket string, noIndexPage int) error { reqURL := fmt.Sprintf("%s/noIndexPage?bucket=%s&noIndexPage=%d", getUcHost(m.Cfg.UseHTTPS), bucket, noIndexPage) return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: nil, - Method: clientv2.RequestMethodPost, - Url: reqURL, - Header: nil, - BodyCreator: nil, + Context: nil, + Method: clientv2.RequestMethodPost, + Url: reqURL, + Header: nil, + GetBody: nil, }, nil) } @@ -682,52 +715,50 @@ type BucketTag struct { // SetTagging 该方法为覆盖所有 Bucket 上之前设置的标签,标签 Key 最大 64 字节,Value 最大 128 字节,均不能为空,且区分大小写 // Key 不能以 kodo 为前缀,Key 和 Value 的字符只能为:字母,数字,空格,+,-,=,.,_,:,/,@,不能支持中文 func (m *BucketManager) SetTagging(bucket string, tags map[string]string) error { - tagging := BucketTagging{Tags: make([]BucketTag, 0, len(tags))} - for key, value := range tags { - tagging.Tags = append(tagging.Tags, BucketTag{Key: key, Value: value}) + tagPairs := make(set_bucket_taggings.Tags, 0, len(tags)) + for k, v := range tags { + tagPairs = append(tagPairs, set_bucket_taggings.TagInfo{Key: k, Value: v}) } - - reqURL := fmt.Sprintf("%s/bucketTagging?bucket=%s", getUcHost(m.Cfg.UseHTTPS), bucket) - return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodPut, - Url: reqURL, - Header: nil, - BodyCreator: clientv2.RequestBodyCreatorOfJson(tagging), - }, nil) + _, err := m.apiClient.SetBucketTaggings( + context.Background(), + &apis.SetBucketTaggingsRequest{ + Bucket: bucket, + Tags: tagPairs, + }, + m.makeRequestOptions(), + ) + return err } // ClearTagging 清空 Bucket 标签 func (m *BucketManager) ClearTagging(bucket string) error { - reqURL := fmt.Sprintf("%s/bucketTagging?bucket=%s", getUcHost(m.Cfg.UseHTTPS), bucket) - return clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodDelete, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, nil) + _, err := m.apiClient.DeleteBucketTaggings( + context.Background(), + &apis.DeleteBucketTaggingsRequest{ + BucketName: bucket, + }, + m.makeRequestOptions(), + ) + return err } // GetTagging 获取 Bucket 标签 -func (m *BucketManager) GetTagging(bucket string) (tags map[string]string, err error) { - var tagging BucketTagging - reqURL := fmt.Sprintf("%s/bucketTagging?bucket=%s", getUcHost(m.Cfg.UseHTTPS), bucket) - err = clientv2.DoAndDecodeJsonResponse(m.getUCClient(), clientv2.RequestParams{ - Context: context.Background(), - Method: clientv2.RequestMethodGet, - Url: reqURL, - Header: nil, - BodyCreator: nil, - }, &tagging) +func (m *BucketManager) GetTagging(bucket string) (map[string]string, error) { + response, err := m.apiClient.GetBucketTaggings( + context.Background(), + &apis.GetBucketTaggingsRequest{ + BucketName: bucket, + }, + m.makeRequestOptions(), + ) if err != nil { - return + return nil, err } - tags = make(map[string]string, len(tagging.Tags)) - for _, tag := range tagging.Tags { + tags := make(map[string]string, len(response.Tags)) + for _, tag := range response.Tags { tags[tag.Key] = tag.Value } - return + return tags, nil } func (m *BucketManager) getUCClient() clientv2.Client { diff --git a/storage/upload_manager_test.go b/storage/upload_manager_test.go index 64a7eea3..7eb6e4b0 100644 --- a/storage/upload_manager_test.go +++ b/storage/upload_manager_test.go @@ -6,11 +6,12 @@ package storage import ( "context" "fmt" - clientV1 "github.com/qiniu/go-sdk/v7/client" "io" "io/ioutil" "os" "testing" + + clientV1 "github.com/qiniu/go-sdk/v7/client" ) func getUploadManager() *UploadManager { diff --git a/storage/upload_progress.go b/storage/upload_progress.go deleted file mode 100644 index d627b0e7..00000000 --- a/storage/upload_progress.go +++ /dev/null @@ -1,26 +0,0 @@ -package storage - -type uploadProgress struct { - lastUploadedBytes int64 - progress func(totalBytes, uploadedBytes int64) -} - -func newUploadProgress(progressHandler func(totalBytes, uploadedBytes int64)) *uploadProgress { - return &uploadProgress{ - lastUploadedBytes: 0, - progress: progressHandler, - } -} - -func (p *uploadProgress) onProgress(totalBytes, uploadedBytes int64) { - if p.progress == nil { - return - } - - if p.lastUploadedBytes >= uploadedBytes { - // 过滤重新上传的场景 - return - } - p.lastUploadedBytes = uploadedBytes - p.progress(totalBytes, uploadedBytes) -} diff --git a/storagev2/apis/add_bucket_rules/api.go b/storagev2/apis/add_bucket_rules/api.go new file mode 100644 index 00000000..46070fe0 --- /dev/null +++ b/storagev2/apis/add_bucket_rules/api.go @@ -0,0 +1,22 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 增加空间规则 +package add_bucket_rules + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider + Bucket string // 空间名称 + Name string // 规则名称空间内唯一,长度小于50,不能为空,只能为字母、数字、下划线 + Prefix string // 匹配的对象名称前缀,同一个空间内前缀不能重复 + DeleteAfterDays int64 // 指定上传文件多少天后删除,指定为 0 表示不删除,大于 0 表示多少天后删除 + ToIaAfterDays int64 // 指定文件上传多少天后转低频存储。指定为 0 表示不转低频存储 + ToArchiveAfterDays int64 // 指定文件上传多少天后转归档存储。指定为 0 表示不转归档存储 + ToDeepArchiveAfterDays int64 // 指定文件上传多少天后转深度归档存储。指定为 0 表示不转深度归档存储 + ToArchiveIrAfterDays int64 // 指定文件上传多少天后转归档直读存储。指定为 0 表示不转归档直读存储 +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/api_add_bucket_rules.go b/storagev2/apis/api_add_bucket_rules.go new file mode 100644 index 00000000..c30eb78a --- /dev/null +++ b/storagev2/apis/api_add_bucket_rules.go @@ -0,0 +1,116 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + addbucketrules "github.com/qiniu/go-sdk/v7/storagev2/apis/add_bucket_rules" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strconv" + "strings" +) + +type innerAddBucketRulesRequest addbucketrules.Request + +func (form *innerAddBucketRulesRequest) getBucketName(ctx context.Context) (string, error) { + return form.Bucket, nil +} +func (form *innerAddBucketRulesRequest) build() (url.Values, error) { + formValues := make(url.Values) + if form.Bucket != "" { + formValues.Set("bucket", form.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + if form.Name != "" { + formValues.Set("name", form.Name) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Name"} + } + formValues.Set("prefix", form.Prefix) + formValues.Set("delete_after_days", strconv.FormatInt(form.DeleteAfterDays, 10)) + formValues.Set("to_line_after_days", strconv.FormatInt(form.ToIaAfterDays, 10)) + formValues.Set("to_archive_after_days", strconv.FormatInt(form.ToArchiveAfterDays, 10)) + formValues.Set("to_deep_archive_after_days", strconv.FormatInt(form.ToDeepArchiveAfterDays, 10)) + formValues.Set("to_archive_ir_after_days", strconv.FormatInt(form.ToArchiveIrAfterDays, 10)) + return formValues, nil +} +func (request *innerAddBucketRulesRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type AddBucketRulesRequest = addbucketrules.Request +type AddBucketRulesResponse = addbucketrules.Response + +// 增加空间规则 +func (storage *Storage) AddBucketRules(ctx context.Context, request *AddBucketRulesRequest, options *Options) (*AddBucketRulesResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerAddBucketRulesRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "rules", "add") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body, err := innerRequest.build() + if err != nil { + return nil, err + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: httpclient.GetFormRequestBody(body)} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &AddBucketRulesResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_async_fetch_object.go b/storagev2/apis/api_async_fetch_object.go new file mode 100644 index 00000000..d9cdf8ae --- /dev/null +++ b/storagev2/apis/api_async_fetch_object.go @@ -0,0 +1,109 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/json" + auth "github.com/qiniu/go-sdk/v7/auth" + asyncfetchobject "github.com/qiniu/go-sdk/v7/storagev2/apis/async_fetch_object" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerAsyncFetchObjectRequest asyncfetchobject.Request + +func (j *innerAsyncFetchObjectRequest) getBucketName(ctx context.Context) (string, error) { + return j.Bucket, nil +} +func (j *innerAsyncFetchObjectRequest) MarshalJSON() ([]byte, error) { + return json.Marshal((*asyncfetchobject.Request)(j)) +} +func (j *innerAsyncFetchObjectRequest) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, (*asyncfetchobject.Request)(j)) +} +func (request *innerAsyncFetchObjectRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type AsyncFetchObjectRequest = asyncfetchobject.Request +type AsyncFetchObjectResponse = asyncfetchobject.Response + +// 从指定 URL 抓取资源,并将该资源存储到指定空间中。每次只抓取一个文件,抓取时可以指定保存空间名和最终资源名 +func (storage *Storage) AsyncFetchObject(ctx context.Context, request *AsyncFetchObjectRequest, options *Options) (*AsyncFetchObjectResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerAsyncFetchObjectRequest)(request) + serviceNames := []region.ServiceName{region.ServiceApi} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "sisyphus", "fetch") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body, err := httpclient.GetJsonRequestBody(&innerRequest) + if err != nil { + return nil, err + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, RequestBody: body} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody AsyncFetchObjectResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_batch_ops.go b/storagev2/apis/api_batch_ops.go new file mode 100644 index 00000000..3170031a --- /dev/null +++ b/storagev2/apis/api_batch_ops.go @@ -0,0 +1,106 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + batchops "github.com/qiniu/go-sdk/v7/storagev2/apis/batch_ops" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strings" +) + +type innerBatchOpsRequest batchops.Request + +func (form *innerBatchOpsRequest) build() (url.Values, error) { + formValues := make(url.Values) + if len(form.Operations) > 0 { + for _, value := range form.Operations { + formValues.Add("op", value) + } + } else { + return nil, errors.MissingRequiredFieldError{Name: "Operations"} + } + return formValues, nil +} +func (request *innerBatchOpsRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type BatchOpsRequest = batchops.Request +type BatchOpsResponse = batchops.Response + +// 批量操作意指在单一请求中执行多次(最大限制1000次) 查询元信息、修改元信息、移动、复制、删除、修改状态、修改存储类型、修改生命周期和解冻操作,极大提高对象管理效率。其中,解冻操作仅针对归档存储文件有效 +func (storage *Storage) BatchOps(ctx context.Context, request *BatchOpsRequest, options *Options) (*BatchOpsResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerBatchOpsRequest)(request) + serviceNames := []region.ServiceName{region.ServiceRs} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "batch") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body, err := innerRequest.build() + if err != nil { + return nil, err + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true, RequestBody: httpclient.GetFormRequestBody(body)} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody BatchOpsResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_copy_object.go b/storagev2/apis/api_copy_object.go new file mode 100644 index 00000000..158c9a62 --- /dev/null +++ b/storagev2/apis/api_copy_object.go @@ -0,0 +1,122 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + copyobject "github.com/qiniu/go-sdk/v7/storagev2/apis/copy_object" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerCopyObjectRequest copyobject.Request + +func (pp *innerCopyObjectRequest) getBucketName(ctx context.Context) (string, error) { + return strings.SplitN(pp.SrcEntry, ":", 2)[0], nil +} +func (path *innerCopyObjectRequest) buildPath() ([]string, error) { + var allSegments []string + if path.SrcEntry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.SrcEntry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "SrcEntry"} + } + if path.DestEntry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.DestEntry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "DestEntry"} + } + if path.IsForce { + allSegments = append(allSegments, "force", strconv.FormatBool(path.IsForce)) + } + return allSegments, nil +} +func (request *innerCopyObjectRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type CopyObjectRequest = copyobject.Request +type CopyObjectResponse = copyobject.Response + +// 将源空间的指定对象复制到目标空间 +func (storage *Storage) CopyObject(ctx context.Context, request *CopyObjectRequest, options *Options) (*CopyObjectResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerCopyObjectRequest)(request) + serviceNames := []region.ServiceName{region.ServiceRs} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "copy") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &CopyObjectResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_create_bucket.go b/storagev2/apis/api_create_bucket.go new file mode 100644 index 00000000..c22ffda9 --- /dev/null +++ b/storagev2/apis/api_create_bucket.go @@ -0,0 +1,99 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + createbucket "github.com/qiniu/go-sdk/v7/storagev2/apis/create_bucket" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerCreateBucketRequest createbucket.Request + +func (path *innerCreateBucketRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Bucket != "" { + allSegments = append(allSegments, path.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + if path.Region != "" { + allSegments = append(allSegments, "region", path.Region) + } + return allSegments, nil +} +func (request *innerCreateBucketRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type CreateBucketRequest = createbucket.Request +type CreateBucketResponse = createbucket.Response + +// 创建一个新的存储空间 +func (storage *Storage) CreateBucket(ctx context.Context, request *CreateBucketRequest, options *Options) (*CreateBucketResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerCreateBucketRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "mkbucketv3") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &CreateBucketResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_delete_bucket.go b/storagev2/apis/api_delete_bucket.go new file mode 100644 index 00000000..42a317ea --- /dev/null +++ b/storagev2/apis/api_delete_bucket.go @@ -0,0 +1,104 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + deletebucket "github.com/qiniu/go-sdk/v7/storagev2/apis/delete_bucket" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerDeleteBucketRequest deletebucket.Request + +func (pp *innerDeleteBucketRequest) getBucketName(ctx context.Context) (string, error) { + return pp.Bucket, nil +} +func (path *innerDeleteBucketRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Bucket != "" { + allSegments = append(allSegments, path.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + return allSegments, nil +} +func (request *innerDeleteBucketRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type DeleteBucketRequest = deletebucket.Request +type DeleteBucketResponse = deletebucket.Response + +// 删除指定的存储空间 +func (storage *Storage) DeleteBucket(ctx context.Context, request *DeleteBucketRequest, options *Options) (*DeleteBucketResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerDeleteBucketRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "drop") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &DeleteBucketResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_delete_bucket_rules.go b/storagev2/apis/api_delete_bucket_rules.go new file mode 100644 index 00000000..38a87bcb --- /dev/null +++ b/storagev2/apis/api_delete_bucket_rules.go @@ -0,0 +1,109 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + deletebucketrules "github.com/qiniu/go-sdk/v7/storagev2/apis/delete_bucket_rules" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strings" +) + +type innerDeleteBucketRulesRequest deletebucketrules.Request + +func (form *innerDeleteBucketRulesRequest) getBucketName(ctx context.Context) (string, error) { + return form.Bucket, nil +} +func (form *innerDeleteBucketRulesRequest) build() (url.Values, error) { + formValues := make(url.Values) + if form.Bucket != "" { + formValues.Set("bucket", form.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + if form.Name != "" { + formValues.Set("name", form.Name) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Name"} + } + return formValues, nil +} +func (request *innerDeleteBucketRulesRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type DeleteBucketRulesRequest = deletebucketrules.Request +type DeleteBucketRulesResponse = deletebucketrules.Response + +// 删除空间规则 +func (storage *Storage) DeleteBucketRules(ctx context.Context, request *DeleteBucketRulesRequest, options *Options) (*DeleteBucketRulesResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerDeleteBucketRulesRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "rules", "delete") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body, err := innerRequest.build() + if err != nil { + return nil, err + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: httpclient.GetFormRequestBody(body)} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &DeleteBucketRulesResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_delete_bucket_taggings.go b/storagev2/apis/api_delete_bucket_taggings.go new file mode 100644 index 00000000..5ef33f3b --- /dev/null +++ b/storagev2/apis/api_delete_bucket_taggings.go @@ -0,0 +1,105 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + deletebuckettaggings "github.com/qiniu/go-sdk/v7/storagev2/apis/delete_bucket_taggings" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strings" +) + +type innerDeleteBucketTaggingsRequest deletebuckettaggings.Request + +func (query *innerDeleteBucketTaggingsRequest) getBucketName(ctx context.Context) (string, error) { + return query.BucketName, nil +} +func (query *innerDeleteBucketTaggingsRequest) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.BucketName != "" { + allQuery.Set("bucket", query.BucketName) + } else { + return nil, errors.MissingRequiredFieldError{Name: "BucketName"} + } + return allQuery, nil +} +func (request *innerDeleteBucketTaggingsRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type DeleteBucketTaggingsRequest = deletebuckettaggings.Request +type DeleteBucketTaggingsResponse = deletebuckettaggings.Response + +// 一键删除指定存储空间的所有标签 +func (storage *Storage) DeleteBucketTaggings(ctx context.Context, request *DeleteBucketTaggingsRequest, options *Options) (*DeleteBucketTaggingsResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerDeleteBucketTaggingsRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "bucketTagging") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + req := httpclient.Request{Method: "DELETE", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &DeleteBucketTaggingsResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_delete_object.go b/storagev2/apis/api_delete_object.go new file mode 100644 index 00000000..34befa9a --- /dev/null +++ b/storagev2/apis/api_delete_object.go @@ -0,0 +1,113 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + deleteobject "github.com/qiniu/go-sdk/v7/storagev2/apis/delete_object" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerDeleteObjectRequest deleteobject.Request + +func (pp *innerDeleteObjectRequest) getBucketName(ctx context.Context) (string, error) { + return strings.SplitN(pp.Entry, ":", 2)[0], nil +} +func (path *innerDeleteObjectRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Entry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.Entry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Entry"} + } + return allSegments, nil +} +func (request *innerDeleteObjectRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type DeleteObjectRequest = deleteobject.Request +type DeleteObjectResponse = deleteobject.Response + +// 删除指定对象 +func (storage *Storage) DeleteObject(ctx context.Context, request *DeleteObjectRequest, options *Options) (*DeleteObjectResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerDeleteObjectRequest)(request) + serviceNames := []region.ServiceName{region.ServiceRs} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "delete") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &DeleteObjectResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_delete_object_after_days.go b/storagev2/apis/api_delete_object_after_days.go new file mode 100644 index 00000000..2d5135ff --- /dev/null +++ b/storagev2/apis/api_delete_object_after_days.go @@ -0,0 +1,115 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + deleteobjectafterdays "github.com/qiniu/go-sdk/v7/storagev2/apis/delete_object_after_days" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerDeleteObjectAfterDaysRequest deleteobjectafterdays.Request + +func (pp *innerDeleteObjectAfterDaysRequest) getBucketName(ctx context.Context) (string, error) { + return strings.SplitN(pp.Entry, ":", 2)[0], nil +} +func (path *innerDeleteObjectAfterDaysRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Entry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.Entry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Entry"} + } + allSegments = append(allSegments, strconv.FormatInt(path.DeleteAfterDays, 10)) + return allSegments, nil +} +func (request *innerDeleteObjectAfterDaysRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type DeleteObjectAfterDaysRequest = deleteobjectafterdays.Request +type DeleteObjectAfterDaysResponse = deleteobjectafterdays.Response + +// 更新文件生命周期 +func (storage *Storage) DeleteObjectAfterDays(ctx context.Context, request *DeleteObjectAfterDaysRequest, options *Options) (*DeleteObjectAfterDaysResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerDeleteObjectAfterDaysRequest)(request) + serviceNames := []region.ServiceName{region.ServiceRs} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "deleteAfterDays") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &DeleteObjectAfterDaysResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_fetch_object.go b/storagev2/apis/api_fetch_object.go new file mode 100644 index 00000000..bd0d3e3d --- /dev/null +++ b/storagev2/apis/api_fetch_object.go @@ -0,0 +1,121 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + fetchobject "github.com/qiniu/go-sdk/v7/storagev2/apis/fetch_object" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerFetchObjectRequest fetchobject.Request + +func (pp *innerFetchObjectRequest) getBucketName(ctx context.Context) (string, error) { + return strings.SplitN(pp.ToEntry, ":", 2)[0], nil +} +func (path *innerFetchObjectRequest) buildPath() ([]string, error) { + var allSegments []string + if path.FromUrl != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.FromUrl))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "FromUrl"} + } + if path.ToEntry != "" { + allSegments = append(allSegments, "to", base64.URLEncoding.EncodeToString([]byte(path.ToEntry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "ToEntry"} + } + if path.Host != "" { + allSegments = append(allSegments, "host", base64.URLEncoding.EncodeToString([]byte(path.Host))) + } + return allSegments, nil +} +func (request *innerFetchObjectRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type FetchObjectRequest = fetchobject.Request +type FetchObjectResponse = fetchobject.Response + +// 从指定 URL 抓取指定名称的对象并存储到该空间中 +func (storage *Storage) FetchObject(ctx context.Context, request *FetchObjectRequest, options *Options) (*FetchObjectResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerFetchObjectRequest)(request) + serviceNames := []region.ServiceName{region.ServiceIo} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "fetch") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody FetchObjectResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_get_async_fetch_task.go b/storagev2/apis/api_get_async_fetch_task.go new file mode 100644 index 00000000..baf8b936 --- /dev/null +++ b/storagev2/apis/api_get_async_fetch_task.go @@ -0,0 +1,105 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + getasyncfetchtask "github.com/qiniu/go-sdk/v7/storagev2/apis/get_async_fetch_task" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strings" +) + +type innerGetAsyncFetchTaskRequest getasyncfetchtask.Request + +func (query *innerGetAsyncFetchTaskRequest) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.Id != "" { + allQuery.Set("id", query.Id) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Id"} + } + return allQuery, nil +} +func (request *innerGetAsyncFetchTaskRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type GetAsyncFetchTaskRequest = getasyncfetchtask.Request +type GetAsyncFetchTaskResponse = getasyncfetchtask.Response + +// 查询异步抓取任务 +func (storage *Storage) GetAsyncFetchTask(ctx context.Context, request *GetAsyncFetchTaskRequest, options *Options) (*GetAsyncFetchTaskResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerGetAsyncFetchTaskRequest)(request) + serviceNames := []region.ServiceName{region.ServiceApi} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "sisyphus", "fetch") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody GetAsyncFetchTaskResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_get_bucket_cors_rules.go b/storagev2/apis/api_get_bucket_cors_rules.go new file mode 100644 index 00000000..89f5580e --- /dev/null +++ b/storagev2/apis/api_get_bucket_cors_rules.go @@ -0,0 +1,104 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + getbucketcorsrules "github.com/qiniu/go-sdk/v7/storagev2/apis/get_bucket_cors_rules" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerGetBucketCORSRulesRequest getbucketcorsrules.Request + +func (pp *innerGetBucketCORSRulesRequest) getBucketName(ctx context.Context) (string, error) { + return pp.Bucket, nil +} +func (path *innerGetBucketCORSRulesRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Bucket != "" { + allSegments = append(allSegments, path.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + return allSegments, nil +} +func (request *innerGetBucketCORSRulesRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type GetBucketCORSRulesRequest = getbucketcorsrules.Request +type GetBucketCORSRulesResponse = getbucketcorsrules.Response + +// 获取空间的跨域规则 +func (storage *Storage) GetBucketCORSRules(ctx context.Context, request *GetBucketCORSRulesRequest, options *Options) (*GetBucketCORSRulesResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerGetBucketCORSRulesRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "corsRules", "get") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody GetBucketCORSRulesResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_get_bucket_domains.go b/storagev2/apis/api_get_bucket_domains.go new file mode 100644 index 00000000..05a3ebd7 --- /dev/null +++ b/storagev2/apis/api_get_bucket_domains.go @@ -0,0 +1,105 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + getbucketdomains "github.com/qiniu/go-sdk/v7/storagev2/apis/get_bucket_domains" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strings" +) + +type innerGetBucketDomainsRequest getbucketdomains.Request + +func (query *innerGetBucketDomainsRequest) getBucketName(ctx context.Context) (string, error) { + return query.BucketName, nil +} +func (query *innerGetBucketDomainsRequest) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.BucketName != "" { + allQuery.Set("tbl", query.BucketName) + } else { + return nil, errors.MissingRequiredFieldError{Name: "BucketName"} + } + return allQuery, nil +} +func (request *innerGetBucketDomainsRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type GetBucketDomainsRequest = getbucketdomains.Request +type GetBucketDomainsResponse = getbucketdomains.Response + +// 获取存储空间的域名列表 +func (storage *Storage) GetBucketDomains(ctx context.Context, request *GetBucketDomainsRequest, options *Options) (*GetBucketDomainsResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerGetBucketDomainsRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "v2", "domains") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody GetBucketDomainsResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_get_bucket_quota.go b/storagev2/apis/api_get_bucket_quota.go new file mode 100644 index 00000000..adfd12aa --- /dev/null +++ b/storagev2/apis/api_get_bucket_quota.go @@ -0,0 +1,104 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + getbucketquota "github.com/qiniu/go-sdk/v7/storagev2/apis/get_bucket_quota" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerGetBucketQuotaRequest getbucketquota.Request + +func (pp *innerGetBucketQuotaRequest) getBucketName(ctx context.Context) (string, error) { + return pp.Bucket, nil +} +func (path *innerGetBucketQuotaRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Bucket != "" { + allSegments = append(allSegments, path.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + return allSegments, nil +} +func (request *innerGetBucketQuotaRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type GetBucketQuotaRequest = getbucketquota.Request +type GetBucketQuotaResponse = getbucketquota.Response + +// 获取用户存储空间配额限制 +func (storage *Storage) GetBucketQuota(ctx context.Context, request *GetBucketQuotaRequest, options *Options) (*GetBucketQuotaResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerGetBucketQuotaRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "getbucketquota") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody GetBucketQuotaResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_get_bucket_rules.go b/storagev2/apis/api_get_bucket_rules.go new file mode 100644 index 00000000..0142cda0 --- /dev/null +++ b/storagev2/apis/api_get_bucket_rules.go @@ -0,0 +1,97 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + getbucketrules "github.com/qiniu/go-sdk/v7/storagev2/apis/get_bucket_rules" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strings" +) + +type innerGetBucketRulesRequest getbucketrules.Request + +func (query *innerGetBucketRulesRequest) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.Bucket != "" { + allQuery.Set("bucket", query.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + return allQuery, nil +} +func (request *innerGetBucketRulesRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type GetBucketRulesRequest = getbucketrules.Request +type GetBucketRulesResponse = getbucketrules.Response + +// 获取空间规则 +func (storage *Storage) GetBucketRules(ctx context.Context, request *GetBucketRulesRequest, options *Options) (*GetBucketRulesResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerGetBucketRulesRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "rules", "get") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody GetBucketRulesResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_get_bucket_taggings.go b/storagev2/apis/api_get_bucket_taggings.go new file mode 100644 index 00000000..4830adb4 --- /dev/null +++ b/storagev2/apis/api_get_bucket_taggings.go @@ -0,0 +1,105 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + getbuckettaggings "github.com/qiniu/go-sdk/v7/storagev2/apis/get_bucket_taggings" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strings" +) + +type innerGetBucketTaggingsRequest getbuckettaggings.Request + +func (query *innerGetBucketTaggingsRequest) getBucketName(ctx context.Context) (string, error) { + return query.BucketName, nil +} +func (query *innerGetBucketTaggingsRequest) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.BucketName != "" { + allQuery.Set("bucket", query.BucketName) + } else { + return nil, errors.MissingRequiredFieldError{Name: "BucketName"} + } + return allQuery, nil +} +func (request *innerGetBucketTaggingsRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type GetBucketTaggingsRequest = getbuckettaggings.Request +type GetBucketTaggingsResponse = getbuckettaggings.Response + +// 查询指定的存储空间已设置的标签信息 +func (storage *Storage) GetBucketTaggings(ctx context.Context, request *GetBucketTaggingsRequest, options *Options) (*GetBucketTaggingsResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerGetBucketTaggingsRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "bucketTagging") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody GetBucketTaggingsResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_get_buckets.go b/storagev2/apis/api_get_buckets.go new file mode 100644 index 00000000..16f0a347 --- /dev/null +++ b/storagev2/apis/api_get_buckets.go @@ -0,0 +1,95 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + getbuckets "github.com/qiniu/go-sdk/v7/storagev2/apis/get_buckets" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strings" +) + +type innerGetBucketsRequest getbuckets.Request + +func (query *innerGetBucketsRequest) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.Shared != "" { + allQuery.Set("shared", query.Shared) + } + return allQuery, nil +} +func (request *innerGetBucketsRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type GetBucketsRequest = getbuckets.Request +type GetBucketsResponse = getbuckets.Response + +// 获取拥有的所有存储空间列表 +func (storage *Storage) GetBuckets(ctx context.Context, request *GetBucketsRequest, options *Options) (*GetBucketsResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerGetBucketsRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "buckets") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody GetBucketsResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_get_buckets_v4.go b/storagev2/apis/api_get_buckets_v4.go new file mode 100644 index 00000000..64c30fc8 --- /dev/null +++ b/storagev2/apis/api_get_buckets_v4.go @@ -0,0 +1,102 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + getbucketsv4 "github.com/qiniu/go-sdk/v7/storagev2/apis/get_buckets_v4" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strconv" + "strings" +) + +type innerGetBucketsV4Request getbucketsv4.Request + +func (query *innerGetBucketsV4Request) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.Region != "" { + allQuery.Set("region", query.Region) + } + if query.Limit != 0 { + allQuery.Set("limit", strconv.FormatInt(query.Limit, 10)) + } + if query.Marker != "" { + allQuery.Set("marker", query.Marker) + } + return allQuery, nil +} +func (request *innerGetBucketsV4Request) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type GetBucketsV4Request = getbucketsv4.Request +type GetBucketsV4Response = getbucketsv4.Response + +// 获取拥有的所有存储空间列表 +func (storage *Storage) GetBucketsV4(ctx context.Context, request *GetBucketsV4Request, options *Options) (*GetBucketsV4Response, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerGetBucketsV4Request)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "buckets") + path := "/" + strings.Join(pathSegments, "/") + rawQuery := "apiVersion=v4&" + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody GetBucketsV4Response + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_get_objects.go b/storagev2/apis/api_get_objects.go new file mode 100644 index 00000000..b28b2e53 --- /dev/null +++ b/storagev2/apis/api_get_objects.go @@ -0,0 +1,129 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + getobjects "github.com/qiniu/go-sdk/v7/storagev2/apis/get_objects" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strconv" + "strings" +) + +type innerGetObjectsRequest getobjects.Request + +func (query *innerGetObjectsRequest) getBucketName(ctx context.Context) (string, error) { + return query.Bucket, nil +} +func (query *innerGetObjectsRequest) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.Bucket != "" { + allQuery.Set("bucket", query.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + if query.Marker != "" { + allQuery.Set("marker", query.Marker) + } + if query.Limit != 0 { + allQuery.Set("limit", strconv.FormatInt(query.Limit, 10)) + } + if query.Prefix != "" { + allQuery.Set("prefix", query.Prefix) + } + if query.Delimiter != "" { + allQuery.Set("delimiter", query.Delimiter) + } + if query.NeedParts { + allQuery.Set("needparts", strconv.FormatBool(query.NeedParts)) + } + return allQuery, nil +} +func (request *innerGetObjectsRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type GetObjectsRequest = getobjects.Request +type GetObjectsResponse = getobjects.Response + +// 列举指定存储空间里的所有对象条目 +func (storage *Storage) GetObjects(ctx context.Context, request *GetObjectsRequest, options *Options) (*GetObjectsResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerGetObjectsRequest)(request) + serviceNames := []region.ServiceName{region.ServiceRsf} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "list") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody GetObjectsResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_get_objects_v2.go b/storagev2/apis/api_get_objects_v2.go new file mode 100644 index 00000000..7be7bd33 --- /dev/null +++ b/storagev2/apis/api_get_objects_v2.go @@ -0,0 +1,129 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + getobjectsv2 "github.com/qiniu/go-sdk/v7/storagev2/apis/get_objects_v2" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strconv" + "strings" +) + +type innerGetObjectsV2Request getobjectsv2.Request + +func (query *innerGetObjectsV2Request) getBucketName(ctx context.Context) (string, error) { + return query.Bucket, nil +} +func (query *innerGetObjectsV2Request) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.Bucket != "" { + allQuery.Set("bucket", query.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + if query.Marker != "" { + allQuery.Set("marker", query.Marker) + } + if query.Limit != 0 { + allQuery.Set("limit", strconv.FormatInt(query.Limit, 10)) + } + if query.Prefix != "" { + allQuery.Set("prefix", query.Prefix) + } + if query.Delimiter != "" { + allQuery.Set("delimiter", query.Delimiter) + } + if query.NeedParts { + allQuery.Set("needparts", strconv.FormatBool(query.NeedParts)) + } + return allQuery, nil +} +func (request *innerGetObjectsV2Request) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type GetObjectsV2Request = getobjectsv2.Request +type GetObjectsV2Response = getobjectsv2.Response + +// 列举指定存储空间里的所有对象条目 +func (storage *Storage) GetObjectsV2(ctx context.Context, request *GetObjectsV2Request, options *Options) (*GetObjectsV2Response, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerGetObjectsV2Request)(request) + serviceNames := []region.ServiceName{region.ServiceRsf} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "v2", "list") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &GetObjectsV2Response{Body: resp.Body}, nil +} diff --git a/storagev2/apis/api_get_regions.go b/storagev2/apis/api_get_regions.go new file mode 100644 index 00000000..19486907 --- /dev/null +++ b/storagev2/apis/api_get_regions.go @@ -0,0 +1,82 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + getregions "github.com/qiniu/go-sdk/v7/storagev2/apis/get_regions" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerGetRegionsRequest getregions.Request + +func (request *innerGetRegionsRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type GetRegionsRequest = getregions.Request +type GetRegionsResponse = getregions.Response + +// 获取所有区域信息 +func (storage *Storage) GetRegions(ctx context.Context, request *GetRegionsRequest, options *Options) (*GetRegionsResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerGetRegionsRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "regions") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody GetRegionsResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_modify_object_life_cycle.go b/storagev2/apis/api_modify_object_life_cycle.go new file mode 100644 index 00000000..b60c1370 --- /dev/null +++ b/storagev2/apis/api_modify_object_life_cycle.go @@ -0,0 +1,129 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + modifyobjectlifecycle "github.com/qiniu/go-sdk/v7/storagev2/apis/modify_object_life_cycle" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerModifyObjectLifeCycleRequest modifyobjectlifecycle.Request + +func (pp *innerModifyObjectLifeCycleRequest) getBucketName(ctx context.Context) (string, error) { + return strings.SplitN(pp.Entry, ":", 2)[0], nil +} +func (path *innerModifyObjectLifeCycleRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Entry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.Entry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Entry"} + } + if path.ToIaAfterDays != 0 { + allSegments = append(allSegments, "toIAAfterDays", strconv.FormatInt(path.ToIaAfterDays, 10)) + } + if path.ToArchiveAfterDays != 0 { + allSegments = append(allSegments, "toArchiveAfterDays", strconv.FormatInt(path.ToArchiveAfterDays, 10)) + } + if path.ToDeepArchiveAfterDays != 0 { + allSegments = append(allSegments, "toDeepArchiveAfterDays", strconv.FormatInt(path.ToDeepArchiveAfterDays, 10)) + } + if path.ToArchiveIrAfterDays != 0 { + allSegments = append(allSegments, "toArchiveIRAfterDays", strconv.FormatInt(path.ToArchiveIrAfterDays, 10)) + } + if path.DeleteAfterDays != 0 { + allSegments = append(allSegments, "deleteAfterDays", strconv.FormatInt(path.DeleteAfterDays, 10)) + } + return allSegments, nil +} +func (request *innerModifyObjectLifeCycleRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type ModifyObjectLifeCycleRequest = modifyobjectlifecycle.Request +type ModifyObjectLifeCycleResponse = modifyobjectlifecycle.Response + +// 修改已上传对象的生命周期 +func (storage *Storage) ModifyObjectLifeCycle(ctx context.Context, request *ModifyObjectLifeCycleRequest, options *Options) (*ModifyObjectLifeCycleResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerModifyObjectLifeCycleRequest)(request) + serviceNames := []region.ServiceName{region.ServiceRs} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "lifecycle") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &ModifyObjectLifeCycleResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_modify_object_metadata.go b/storagev2/apis/api_modify_object_metadata.go new file mode 100644 index 00000000..efb61536 --- /dev/null +++ b/storagev2/apis/api_modify_object_metadata.go @@ -0,0 +1,123 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + modifyobjectmetadata "github.com/qiniu/go-sdk/v7/storagev2/apis/modify_object_metadata" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerModifyObjectMetadataRequest modifyobjectmetadata.Request + +func (pp *innerModifyObjectMetadataRequest) getBucketName(ctx context.Context) (string, error) { + return strings.SplitN(pp.Entry, ":", 2)[0], nil +} +func (path *innerModifyObjectMetadataRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Entry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.Entry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Entry"} + } + if path.MimeType != "" { + allSegments = append(allSegments, "mime", base64.URLEncoding.EncodeToString([]byte(path.MimeType))) + } + if path.Condition != "" { + allSegments = append(allSegments, "cond", base64.URLEncoding.EncodeToString([]byte(path.Condition))) + } + for key, value := range path.MetaData { + allSegments = append(allSegments, key) + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(value))) + } + return allSegments, nil +} +func (request *innerModifyObjectMetadataRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type ModifyObjectMetadataRequest = modifyobjectmetadata.Request +type ModifyObjectMetadataResponse = modifyobjectmetadata.Response + +// 修改文件元信息 +func (storage *Storage) ModifyObjectMetadata(ctx context.Context, request *ModifyObjectMetadataRequest, options *Options) (*ModifyObjectMetadataResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerModifyObjectMetadataRequest)(request) + serviceNames := []region.ServiceName{region.ServiceRs} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "chgm") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &ModifyObjectMetadataResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_modify_object_status.go b/storagev2/apis/api_modify_object_status.go new file mode 100644 index 00000000..d158b936 --- /dev/null +++ b/storagev2/apis/api_modify_object_status.go @@ -0,0 +1,115 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + modifyobjectstatus "github.com/qiniu/go-sdk/v7/storagev2/apis/modify_object_status" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerModifyObjectStatusRequest modifyobjectstatus.Request + +func (pp *innerModifyObjectStatusRequest) getBucketName(ctx context.Context) (string, error) { + return strings.SplitN(pp.Entry, ":", 2)[0], nil +} +func (path *innerModifyObjectStatusRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Entry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.Entry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Entry"} + } + allSegments = append(allSegments, "status", strconv.FormatInt(path.Status, 10)) + return allSegments, nil +} +func (request *innerModifyObjectStatusRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type ModifyObjectStatusRequest = modifyobjectstatus.Request +type ModifyObjectStatusResponse = modifyobjectstatus.Response + +// 修改文件的存储状态,即禁用状态和启用状态间的的互相转换 +func (storage *Storage) ModifyObjectStatus(ctx context.Context, request *ModifyObjectStatusRequest, options *Options) (*ModifyObjectStatusResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerModifyObjectStatusRequest)(request) + serviceNames := []region.ServiceName{region.ServiceRs} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "chstatus") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &ModifyObjectStatusResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_move_object.go b/storagev2/apis/api_move_object.go new file mode 100644 index 00000000..8a0a9975 --- /dev/null +++ b/storagev2/apis/api_move_object.go @@ -0,0 +1,122 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + moveobject "github.com/qiniu/go-sdk/v7/storagev2/apis/move_object" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerMoveObjectRequest moveobject.Request + +func (pp *innerMoveObjectRequest) getBucketName(ctx context.Context) (string, error) { + return strings.SplitN(pp.SrcEntry, ":", 2)[0], nil +} +func (path *innerMoveObjectRequest) buildPath() ([]string, error) { + var allSegments []string + if path.SrcEntry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.SrcEntry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "SrcEntry"} + } + if path.DestEntry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.DestEntry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "DestEntry"} + } + if path.IsForce { + allSegments = append(allSegments, "force", strconv.FormatBool(path.IsForce)) + } + return allSegments, nil +} +func (request *innerMoveObjectRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type MoveObjectRequest = moveobject.Request +type MoveObjectResponse = moveobject.Response + +// 将源空间的指定对象移动到目标空间,或在同一空间内对对象重命名 +func (storage *Storage) MoveObject(ctx context.Context, request *MoveObjectRequest, options *Options) (*MoveObjectResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerMoveObjectRequest)(request) + serviceNames := []region.ServiceName{region.ServiceRs} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "move") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &MoveObjectResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_post_object.go b/storagev2/apis/api_post_object.go new file mode 100644 index 00000000..cf8ead35 --- /dev/null +++ b/storagev2/apis/api_post_object.go @@ -0,0 +1,134 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + postobject "github.com/qiniu/go-sdk/v7/storagev2/apis/post_object" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerPostObjectRequest postobject.Request + +func (form *innerPostObjectRequest) getBucketName(ctx context.Context) (string, error) { + putPolicy, err := form.UploadToken.GetPutPolicy(ctx) + if err != nil { + return "", err + } else { + return putPolicy.GetBucketName() + } +} +func (form *innerPostObjectRequest) build(ctx context.Context) (*httpclient.MultipartForm, error) { + multipartForm := new(httpclient.MultipartForm) + if form.ObjectName != nil { + multipartForm.SetValue("key", *form.ObjectName) + } + if form.UploadToken != nil { + upToken, err := form.UploadToken.GetUpToken(ctx) + if err != nil { + return nil, err + } + multipartForm.SetValue("token", upToken) + } else { + return nil, errors.MissingRequiredFieldError{Name: "UploadToken"} + } + if form.Crc32 != 0 { + multipartForm.SetValue("crc32", strconv.FormatInt(form.Crc32, 10)) + } + if form.File.Data != nil { + if form.File.Name == "" { + return nil, errors.MissingRequiredFieldError{Name: "File.Name"} + } + multipartForm.SetFile("file", form.File.Name, form.File.Data) + } else { + return nil, errors.MissingRequiredFieldError{Name: "File"} + } + for key, value := range form.CustomData { + multipartForm.SetValue(key, value) + } + return multipartForm, nil +} +func (request *innerPostObjectRequest) getAccessKey(ctx context.Context) (string, error) { + if request.UploadToken != nil { + if accessKey, err := request.UploadToken.GetAccessKey(ctx); err != nil { + return "", err + } else { + return accessKey, nil + } + } + return "", nil +} + +type PostObjectRequest = postobject.Request +type PostObjectResponse = postobject.Response + +// 在一次 HTTP 会话中上传单一的一个文件 +func (storage *Storage) PostObject(ctx context.Context, request *PostObjectRequest, options *Options) (*PostObjectResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerPostObjectRequest)(request) + serviceNames := []region.ServiceName{region.ServiceUp} + var pathSegments []string + pathSegments = append(pathSegments, "") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body, err := innerRequest.build(ctx) + if err != nil { + return nil, err + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, BufferResponse: true, RequestBody: httpclient.GetMultipartFormRequestBody(body)} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + respBody := PostObjectResponse{Body: innerRequest.ResponseBody} + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_prefetch_object.go b/storagev2/apis/api_prefetch_object.go new file mode 100644 index 00000000..62cafbe1 --- /dev/null +++ b/storagev2/apis/api_prefetch_object.go @@ -0,0 +1,113 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + prefetchobject "github.com/qiniu/go-sdk/v7/storagev2/apis/prefetch_object" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerPrefetchObjectRequest prefetchobject.Request + +func (pp *innerPrefetchObjectRequest) getBucketName(ctx context.Context) (string, error) { + return strings.SplitN(pp.Entry, ":", 2)[0], nil +} +func (path *innerPrefetchObjectRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Entry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.Entry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Entry"} + } + return allSegments, nil +} +func (request *innerPrefetchObjectRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type PrefetchObjectRequest = prefetchobject.Request +type PrefetchObjectResponse = prefetchobject.Response + +// 对于设置了镜像存储的空间,从镜像源站抓取指定名称的对象并存储到该空间中,如果该空间中已存在该名称的对象,则会将镜像源站的对象覆盖空间中相同名称的对象 +func (storage *Storage) PrefetchObject(ctx context.Context, request *PrefetchObjectRequest, options *Options) (*PrefetchObjectResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerPrefetchObjectRequest)(request) + serviceNames := []region.ServiceName{region.ServiceIo} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "prefetch") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &PrefetchObjectResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_restore_archived_object.go b/storagev2/apis/api_restore_archived_object.go new file mode 100644 index 00000000..1d0024ec --- /dev/null +++ b/storagev2/apis/api_restore_archived_object.go @@ -0,0 +1,119 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + restorearchivedobject "github.com/qiniu/go-sdk/v7/storagev2/apis/restore_archived_object" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerRestoreArchivedObjectRequest restorearchivedobject.Request + +func (pp *innerRestoreArchivedObjectRequest) getBucketName(ctx context.Context) (string, error) { + return strings.SplitN(pp.Entry, ":", 2)[0], nil +} +func (path *innerRestoreArchivedObjectRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Entry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.Entry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Entry"} + } + if path.FreezeAfterDays != 0 { + allSegments = append(allSegments, "freezeAfterDays", strconv.FormatInt(path.FreezeAfterDays, 10)) + } else { + return nil, errors.MissingRequiredFieldError{Name: "FreezeAfterDays"} + } + return allSegments, nil +} +func (request *innerRestoreArchivedObjectRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type RestoreArchivedObjectRequest = restorearchivedobject.Request +type RestoreArchivedObjectResponse = restorearchivedobject.Response + +// 解冻归档存储类型的文件,可设置解冻有效期1~7天,完成解冻任务通常需要1~5分钟 +func (storage *Storage) RestoreArchivedObject(ctx context.Context, request *RestoreArchivedObjectRequest, options *Options) (*RestoreArchivedObjectResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerRestoreArchivedObjectRequest)(request) + serviceNames := []region.ServiceName{region.ServiceRs} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "restoreAr") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &RestoreArchivedObjectResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_resumable_upload_v1_bput.go b/storagev2/apis/api_resumable_upload_v1_bput.go new file mode 100644 index 00000000..9370e70b --- /dev/null +++ b/storagev2/apis/api_resumable_upload_v1_bput.go @@ -0,0 +1,120 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + resumableuploadv1bput "github.com/qiniu/go-sdk/v7/storagev2/apis/resumable_upload_v1_bput" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerResumableUploadV1BputRequest resumableuploadv1bput.Request + +func (request *innerResumableUploadV1BputRequest) getBucketName(ctx context.Context) (string, error) { + if request.UpToken != nil { + if putPolicy, err := request.UpToken.GetPutPolicy(ctx); err != nil { + return "", err + } else { + return putPolicy.GetBucketName() + } + } + return "", nil +} +func (path *innerResumableUploadV1BputRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Ctx != "" { + allSegments = append(allSegments, path.Ctx) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Ctx"} + } + allSegments = append(allSegments, strconv.FormatInt(path.ChunkOffset, 10)) + return allSegments, nil +} +func (request *innerResumableUploadV1BputRequest) getAccessKey(ctx context.Context) (string, error) { + if request.UpToken != nil { + return request.UpToken.GetAccessKey(ctx) + } + return "", nil +} + +type ResumableUploadV1BputRequest = resumableuploadv1bput.Request +type ResumableUploadV1BputResponse = resumableuploadv1bput.Response + +// 上传指定块的一片数据,具体数据量可根据现场环境调整,同一块的每片数据必须串行上传 +func (storage *Storage) ResumableUploadV1Bput(ctx context.Context, request *ResumableUploadV1BputRequest, options *Options) (*ResumableUploadV1BputResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerResumableUploadV1BputRequest)(request) + serviceNames := []region.ServiceName{region.ServiceUp} + if innerRequest.UpToken == nil { + return nil, errors.MissingRequiredFieldError{Name: "UpToken"} + } + var pathSegments []string + pathSegments = append(pathSegments, "bput") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body := innerRequest.Body + if body == nil { + return nil, errors.MissingRequiredFieldError{Name: "Body"} + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: httpclient.GetRequestBodyFromReadSeekCloser(body)} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody ResumableUploadV1BputResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_resumable_upload_v1_make_block.go b/storagev2/apis/api_resumable_upload_v1_make_block.go new file mode 100644 index 00000000..3560f940 --- /dev/null +++ b/storagev2/apis/api_resumable_upload_v1_make_block.go @@ -0,0 +1,119 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + resumableuploadv1makeblock "github.com/qiniu/go-sdk/v7/storagev2/apis/resumable_upload_v1_make_block" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerResumableUploadV1MakeBlockRequest resumableuploadv1makeblock.Request + +func (request *innerResumableUploadV1MakeBlockRequest) getBucketName(ctx context.Context) (string, error) { + if request.UpToken != nil { + if putPolicy, err := request.UpToken.GetPutPolicy(ctx); err != nil { + return "", err + } else { + return putPolicy.GetBucketName() + } + } + return "", nil +} +func (path *innerResumableUploadV1MakeBlockRequest) buildPath() ([]string, error) { + var allSegments []string + if path.BlockSize != 0 { + allSegments = append(allSegments, strconv.FormatInt(path.BlockSize, 10)) + } else { + return nil, errors.MissingRequiredFieldError{Name: "BlockSize"} + } + return allSegments, nil +} +func (request *innerResumableUploadV1MakeBlockRequest) getAccessKey(ctx context.Context) (string, error) { + if request.UpToken != nil { + return request.UpToken.GetAccessKey(ctx) + } + return "", nil +} + +type ResumableUploadV1MakeBlockRequest = resumableuploadv1makeblock.Request +type ResumableUploadV1MakeBlockResponse = resumableuploadv1makeblock.Response + +// 为后续分片上传创建一个新的块,同时上传第一片数据 +func (storage *Storage) ResumableUploadV1MakeBlock(ctx context.Context, request *ResumableUploadV1MakeBlockRequest, options *Options) (*ResumableUploadV1MakeBlockResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerResumableUploadV1MakeBlockRequest)(request) + serviceNames := []region.ServiceName{region.ServiceUp} + if innerRequest.UpToken == nil { + return nil, errors.MissingRequiredFieldError{Name: "UpToken"} + } + var pathSegments []string + pathSegments = append(pathSegments, "mkblk") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body := innerRequest.Body + if body == nil { + return nil, errors.MissingRequiredFieldError{Name: "Body"} + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: httpclient.GetRequestBodyFromReadSeekCloser(body)} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody ResumableUploadV1MakeBlockResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_resumable_upload_v1_make_file.go b/storagev2/apis/api_resumable_upload_v1_make_file.go new file mode 100644 index 00000000..5d41632f --- /dev/null +++ b/storagev2/apis/api_resumable_upload_v1_make_file.go @@ -0,0 +1,129 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + resumableuploadv1makefile "github.com/qiniu/go-sdk/v7/storagev2/apis/resumable_upload_v1_make_file" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerResumableUploadV1MakeFileRequest resumableuploadv1makefile.Request + +func (request *innerResumableUploadV1MakeFileRequest) getBucketName(ctx context.Context) (string, error) { + if request.UpToken != nil { + if putPolicy, err := request.UpToken.GetPutPolicy(ctx); err != nil { + return "", err + } else { + return putPolicy.GetBucketName() + } + } + return "", nil +} +func (path *innerResumableUploadV1MakeFileRequest) buildPath() ([]string, error) { + var allSegments []string + allSegments = append(allSegments, strconv.FormatInt(path.Size, 10)) + if path.ObjectName != nil { + allSegments = append(allSegments, "key", base64.URLEncoding.EncodeToString([]byte(*path.ObjectName))) + } + if path.FileName != "" { + allSegments = append(allSegments, "fname", base64.URLEncoding.EncodeToString([]byte(path.FileName))) + } + if path.MimeType != "" { + allSegments = append(allSegments, "mimeType", base64.URLEncoding.EncodeToString([]byte(path.MimeType))) + } + for key, value := range path.CustomData { + allSegments = append(allSegments, key) + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(value))) + } + return allSegments, nil +} +func (request *innerResumableUploadV1MakeFileRequest) getAccessKey(ctx context.Context) (string, error) { + if request.UpToken != nil { + return request.UpToken.GetAccessKey(ctx) + } + return "", nil +} + +type ResumableUploadV1MakeFileRequest = resumableuploadv1makefile.Request +type ResumableUploadV1MakeFileResponse = resumableuploadv1makefile.Response + +// 将上传好的所有数据块按指定顺序合并成一个资源文件 +func (storage *Storage) ResumableUploadV1MakeFile(ctx context.Context, request *ResumableUploadV1MakeFileRequest, options *Options) (*ResumableUploadV1MakeFileResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerResumableUploadV1MakeFileRequest)(request) + serviceNames := []region.ServiceName{region.ServiceUp} + if innerRequest.UpToken == nil { + return nil, errors.MissingRequiredFieldError{Name: "UpToken"} + } + var pathSegments []string + pathSegments = append(pathSegments, "mkfile") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body := innerRequest.Body + if body == nil { + return nil, errors.MissingRequiredFieldError{Name: "Body"} + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: httpclient.GetRequestBodyFromReadSeekCloser(body)} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + respBody := ResumableUploadV1MakeFileResponse{Body: innerRequest.ResponseBody} + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_resumable_upload_v2_abort_multipart_upload.go b/storagev2/apis/api_resumable_upload_v2_abort_multipart_upload.go new file mode 100644 index 00000000..7f14bdb0 --- /dev/null +++ b/storagev2/apis/api_resumable_upload_v2_abort_multipart_upload.go @@ -0,0 +1,125 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + resumableuploadv2abortmultipartupload "github.com/qiniu/go-sdk/v7/storagev2/apis/resumable_upload_v2_abort_multipart_upload" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerResumableUploadV2AbortMultipartUploadRequest resumableuploadv2abortmultipartupload.Request + +func (request *innerResumableUploadV2AbortMultipartUploadRequest) getBucketName(ctx context.Context) (string, error) { + if request.UpToken != nil { + if putPolicy, err := request.UpToken.GetPutPolicy(ctx); err != nil { + return "", err + } else { + return putPolicy.GetBucketName() + } + } + return "", nil +} +func (path *innerResumableUploadV2AbortMultipartUploadRequest) buildPath() ([]string, error) { + var allSegments []string + if path.BucketName != "" { + allSegments = append(allSegments, path.BucketName) + } else { + return nil, errors.MissingRequiredFieldError{Name: "BucketName"} + } + if path.ObjectName != nil { + allSegments = append(allSegments, "objects", base64.URLEncoding.EncodeToString([]byte(*path.ObjectName))) + } else { + allSegments = append(allSegments, "objects", "~") + } + if path.UploadId != "" { + allSegments = append(allSegments, "uploads", path.UploadId) + } else { + return nil, errors.MissingRequiredFieldError{Name: "UploadId"} + } + return allSegments, nil +} +func (request *innerResumableUploadV2AbortMultipartUploadRequest) getAccessKey(ctx context.Context) (string, error) { + if request.UpToken != nil { + return request.UpToken.GetAccessKey(ctx) + } + return "", nil +} + +type ResumableUploadV2AbortMultipartUploadRequest = resumableuploadv2abortmultipartupload.Request +type ResumableUploadV2AbortMultipartUploadResponse = resumableuploadv2abortmultipartupload.Response + +// 根据 UploadId 终止 Multipart Upload +func (storage *Storage) ResumableUploadV2AbortMultipartUpload(ctx context.Context, request *ResumableUploadV2AbortMultipartUploadRequest, options *Options) (*ResumableUploadV2AbortMultipartUploadResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerResumableUploadV2AbortMultipartUploadRequest)(request) + serviceNames := []region.ServiceName{region.ServiceUp} + if innerRequest.UpToken == nil { + return nil, errors.MissingRequiredFieldError{Name: "UpToken"} + } + var pathSegments []string + pathSegments = append(pathSegments, "buckets") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "DELETE", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, UpToken: innerRequest.UpToken} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &ResumableUploadV2AbortMultipartUploadResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_resumable_upload_v2_complete_multipart_upload.go b/storagev2/apis/api_resumable_upload_v2_complete_multipart_upload.go new file mode 100644 index 00000000..86e74403 --- /dev/null +++ b/storagev2/apis/api_resumable_upload_v2_complete_multipart_upload.go @@ -0,0 +1,136 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + "encoding/json" + resumableuploadv2completemultipartupload "github.com/qiniu/go-sdk/v7/storagev2/apis/resumable_upload_v2_complete_multipart_upload" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerResumableUploadV2CompleteMultipartUploadRequest resumableuploadv2completemultipartupload.Request + +func (request *innerResumableUploadV2CompleteMultipartUploadRequest) getBucketName(ctx context.Context) (string, error) { + if request.UpToken != nil { + if putPolicy, err := request.UpToken.GetPutPolicy(ctx); err != nil { + return "", err + } else { + return putPolicy.GetBucketName() + } + } + return "", nil +} +func (path *innerResumableUploadV2CompleteMultipartUploadRequest) buildPath() ([]string, error) { + var allSegments []string + if path.BucketName != "" { + allSegments = append(allSegments, path.BucketName) + } else { + return nil, errors.MissingRequiredFieldError{Name: "BucketName"} + } + if path.ObjectName != nil { + allSegments = append(allSegments, "objects", base64.URLEncoding.EncodeToString([]byte(*path.ObjectName))) + } else { + allSegments = append(allSegments, "objects", "~") + } + if path.UploadId != "" { + allSegments = append(allSegments, "uploads", path.UploadId) + } else { + return nil, errors.MissingRequiredFieldError{Name: "UploadId"} + } + return allSegments, nil +} +func (j *innerResumableUploadV2CompleteMultipartUploadRequest) MarshalJSON() ([]byte, error) { + return json.Marshal((*resumableuploadv2completemultipartupload.Request)(j)) +} +func (j *innerResumableUploadV2CompleteMultipartUploadRequest) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, (*resumableuploadv2completemultipartupload.Request)(j)) +} +func (request *innerResumableUploadV2CompleteMultipartUploadRequest) getAccessKey(ctx context.Context) (string, error) { + if request.UpToken != nil { + return request.UpToken.GetAccessKey(ctx) + } + return "", nil +} + +type ResumableUploadV2CompleteMultipartUploadRequest = resumableuploadv2completemultipartupload.Request +type ResumableUploadV2CompleteMultipartUploadResponse = resumableuploadv2completemultipartupload.Response + +// 在将所有数据分片都上传完成后,必须调用 completeMultipartUpload API 来完成整个文件的 Multipart Upload。用户需要提供有效数据的分片列表(包括 PartNumber 和调用 uploadPart API 服务端返回的 Etag)。服务端收到用户提交的分片列表后,会逐一验证每个数据分片的有效性。当所有的数据分片验证通过后,会把这些数据分片组合成一个完整的对象 +func (storage *Storage) ResumableUploadV2CompleteMultipartUpload(ctx context.Context, request *ResumableUploadV2CompleteMultipartUploadRequest, options *Options) (*ResumableUploadV2CompleteMultipartUploadResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerResumableUploadV2CompleteMultipartUploadRequest)(request) + serviceNames := []region.ServiceName{region.ServiceUp} + if innerRequest.UpToken == nil { + return nil, errors.MissingRequiredFieldError{Name: "UpToken"} + } + var pathSegments []string + pathSegments = append(pathSegments, "buckets") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body, err := httpclient.GetJsonRequestBody(&innerRequest) + if err != nil { + return nil, err + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: body} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + respBody := ResumableUploadV2CompleteMultipartUploadResponse{Body: innerRequest.ResponseBody} + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_resumable_upload_v2_initiate_multipart_upload.go b/storagev2/apis/api_resumable_upload_v2_initiate_multipart_upload.go new file mode 100644 index 00000000..290aeda5 --- /dev/null +++ b/storagev2/apis/api_resumable_upload_v2_initiate_multipart_upload.go @@ -0,0 +1,121 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + resumableuploadv2initiatemultipartupload "github.com/qiniu/go-sdk/v7/storagev2/apis/resumable_upload_v2_initiate_multipart_upload" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerResumableUploadV2InitiateMultipartUploadRequest resumableuploadv2initiatemultipartupload.Request + +func (request *innerResumableUploadV2InitiateMultipartUploadRequest) getBucketName(ctx context.Context) (string, error) { + if request.UpToken != nil { + if putPolicy, err := request.UpToken.GetPutPolicy(ctx); err != nil { + return "", err + } else { + return putPolicy.GetBucketName() + } + } + return "", nil +} +func (path *innerResumableUploadV2InitiateMultipartUploadRequest) buildPath() ([]string, error) { + var allSegments []string + if path.BucketName != "" { + allSegments = append(allSegments, path.BucketName) + } else { + return nil, errors.MissingRequiredFieldError{Name: "BucketName"} + } + if path.ObjectName != nil { + allSegments = append(allSegments, "objects", base64.URLEncoding.EncodeToString([]byte(*path.ObjectName))) + } else { + allSegments = append(allSegments, "objects", "~") + } + return allSegments, nil +} +func (request *innerResumableUploadV2InitiateMultipartUploadRequest) getAccessKey(ctx context.Context) (string, error) { + if request.UpToken != nil { + return request.UpToken.GetAccessKey(ctx) + } + return "", nil +} + +type ResumableUploadV2InitiateMultipartUploadRequest = resumableuploadv2initiatemultipartupload.Request +type ResumableUploadV2InitiateMultipartUploadResponse = resumableuploadv2initiatemultipartupload.Response + +// 使用 Multipart Upload 方式上传数据前,必须先调用 API 来获取一个全局唯一的 UploadId,后续的块数据通过 uploadPart API 上传,整个文件完成 completeMultipartUpload API,已经上传块的删除 abortMultipartUpload API 都依赖该 UploadId +func (storage *Storage) ResumableUploadV2InitiateMultipartUpload(ctx context.Context, request *ResumableUploadV2InitiateMultipartUploadRequest, options *Options) (*ResumableUploadV2InitiateMultipartUploadResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerResumableUploadV2InitiateMultipartUploadRequest)(request) + serviceNames := []region.ServiceName{region.ServiceUp} + if innerRequest.UpToken == nil { + return nil, errors.MissingRequiredFieldError{Name: "UpToken"} + } + var pathSegments []string + pathSegments = append(pathSegments, "buckets") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + pathSegments = append(pathSegments, "uploads") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, UpToken: innerRequest.UpToken, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody ResumableUploadV2InitiateMultipartUploadResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_resumable_upload_v2_list_parts.go b/storagev2/apis/api_resumable_upload_v2_list_parts.go new file mode 100644 index 00000000..e96e517b --- /dev/null +++ b/storagev2/apis/api_resumable_upload_v2_list_parts.go @@ -0,0 +1,142 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + resumableuploadv2listparts "github.com/qiniu/go-sdk/v7/storagev2/apis/resumable_upload_v2_list_parts" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strconv" + "strings" +) + +type innerResumableUploadV2ListPartsRequest resumableuploadv2listparts.Request + +func (request *innerResumableUploadV2ListPartsRequest) getBucketName(ctx context.Context) (string, error) { + if request.UpToken != nil { + if putPolicy, err := request.UpToken.GetPutPolicy(ctx); err != nil { + return "", err + } else { + return putPolicy.GetBucketName() + } + } + return "", nil +} +func (path *innerResumableUploadV2ListPartsRequest) buildPath() ([]string, error) { + var allSegments []string + if path.BucketName != "" { + allSegments = append(allSegments, path.BucketName) + } else { + return nil, errors.MissingRequiredFieldError{Name: "BucketName"} + } + if path.ObjectName != nil { + allSegments = append(allSegments, "objects", base64.URLEncoding.EncodeToString([]byte(*path.ObjectName))) + } else { + allSegments = append(allSegments, "objects", "~") + } + if path.UploadId != "" { + allSegments = append(allSegments, "uploads", path.UploadId) + } else { + return nil, errors.MissingRequiredFieldError{Name: "UploadId"} + } + return allSegments, nil +} +func (query *innerResumableUploadV2ListPartsRequest) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.MaxParts != 0 { + allQuery.Set("max-parts", strconv.FormatInt(query.MaxParts, 10)) + } + if query.PartNumberMarker != 0 { + allQuery.Set("part-number_marker", strconv.FormatInt(query.PartNumberMarker, 10)) + } + return allQuery, nil +} +func (request *innerResumableUploadV2ListPartsRequest) getAccessKey(ctx context.Context) (string, error) { + if request.UpToken != nil { + return request.UpToken.GetAccessKey(ctx) + } + return "", nil +} + +type ResumableUploadV2ListPartsRequest = resumableuploadv2listparts.Request +type ResumableUploadV2ListPartsResponse = resumableuploadv2listparts.Response + +// 列举出指定 UploadId 所属任务所有已经上传成功的分片 +func (storage *Storage) ResumableUploadV2ListParts(ctx context.Context, request *ResumableUploadV2ListPartsRequest, options *Options) (*ResumableUploadV2ListPartsResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerResumableUploadV2ListPartsRequest)(request) + serviceNames := []region.ServiceName{region.ServiceUp} + if innerRequest.UpToken == nil { + return nil, errors.MissingRequiredFieldError{Name: "UpToken"} + } + var pathSegments []string + pathSegments = append(pathSegments, "buckets") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, UpToken: innerRequest.UpToken, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody ResumableUploadV2ListPartsResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_resumable_upload_v2_upload_part.go b/storagev2/apis/api_resumable_upload_v2_upload_part.go new file mode 100644 index 00000000..c5e3444d --- /dev/null +++ b/storagev2/apis/api_resumable_upload_v2_upload_part.go @@ -0,0 +1,147 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + resumableuploadv2uploadpart "github.com/qiniu/go-sdk/v7/storagev2/apis/resumable_upload_v2_upload_part" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/http" + "strconv" + "strings" +) + +type innerResumableUploadV2UploadPartRequest resumableuploadv2uploadpart.Request + +func (request *innerResumableUploadV2UploadPartRequest) getBucketName(ctx context.Context) (string, error) { + if request.UpToken != nil { + if putPolicy, err := request.UpToken.GetPutPolicy(ctx); err != nil { + return "", err + } else { + return putPolicy.GetBucketName() + } + } + return "", nil +} +func (path *innerResumableUploadV2UploadPartRequest) buildPath() ([]string, error) { + var allSegments []string + if path.BucketName != "" { + allSegments = append(allSegments, path.BucketName) + } else { + return nil, errors.MissingRequiredFieldError{Name: "BucketName"} + } + if path.ObjectName != nil { + allSegments = append(allSegments, "objects", base64.URLEncoding.EncodeToString([]byte(*path.ObjectName))) + } else { + allSegments = append(allSegments, "objects", "~") + } + if path.UploadId != "" { + allSegments = append(allSegments, "uploads", path.UploadId) + } else { + return nil, errors.MissingRequiredFieldError{Name: "UploadId"} + } + if path.PartNumber != 0 { + allSegments = append(allSegments, strconv.FormatInt(path.PartNumber, 10)) + } else { + return nil, errors.MissingRequiredFieldError{Name: "PartNumber"} + } + return allSegments, nil +} +func (headers *innerResumableUploadV2UploadPartRequest) buildHeaders() (http.Header, error) { + allHeaders := make(http.Header) + if headers.Md5 != "" { + allHeaders.Set("Content-MD5", headers.Md5) + } + return allHeaders, nil +} +func (request *innerResumableUploadV2UploadPartRequest) getAccessKey(ctx context.Context) (string, error) { + if request.UpToken != nil { + return request.UpToken.GetAccessKey(ctx) + } + return "", nil +} + +type ResumableUploadV2UploadPartRequest = resumableuploadv2uploadpart.Request +type ResumableUploadV2UploadPartResponse = resumableuploadv2uploadpart.Response + +// 初始化一个 Multipart Upload 任务之后,可以根据指定的对象名称和 UploadId 来分片上传数据 +func (storage *Storage) ResumableUploadV2UploadPart(ctx context.Context, request *ResumableUploadV2UploadPartRequest, options *Options) (*ResumableUploadV2UploadPartResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerResumableUploadV2UploadPartRequest)(request) + serviceNames := []region.ServiceName{region.ServiceUp} + if innerRequest.UpToken == nil { + return nil, errors.MissingRequiredFieldError{Name: "UpToken"} + } + headers, err := innerRequest.buildHeaders() + if err != nil { + return nil, err + } + var pathSegments []string + pathSegments = append(pathSegments, "buckets") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body := innerRequest.Body + if body == nil { + return nil, errors.MissingRequiredFieldError{Name: "Body"} + } + req := httpclient.Request{Method: "PUT", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, Header: headers, UpToken: innerRequest.UpToken, BufferResponse: true, RequestBody: httpclient.GetRequestBodyFromReadSeekCloser(body)} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody ResumableUploadV2UploadPartResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_set_bucket_access_mode.go b/storagev2/apis/api_set_bucket_access_mode.go new file mode 100644 index 00000000..c0b38801 --- /dev/null +++ b/storagev2/apis/api_set_bucket_access_mode.go @@ -0,0 +1,106 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + setbucketaccessmode "github.com/qiniu/go-sdk/v7/storagev2/apis/set_bucket_access_mode" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerSetBucketAccessModeRequest setbucketaccessmode.Request + +func (pp *innerSetBucketAccessModeRequest) getBucketName(ctx context.Context) (string, error) { + return pp.Bucket, nil +} +func (path *innerSetBucketAccessModeRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Bucket != "" { + allSegments = append(allSegments, path.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + allSegments = append(allSegments, "mode", strconv.FormatInt(path.Mode, 10)) + return allSegments, nil +} +func (request *innerSetBucketAccessModeRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type SetBucketAccessModeRequest = setbucketaccessmode.Request +type SetBucketAccessModeResponse = setbucketaccessmode.Response + +// 设置存储空间的原图保护 +func (storage *Storage) SetBucketAccessMode(ctx context.Context, request *SetBucketAccessModeRequest, options *Options) (*SetBucketAccessModeResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerSetBucketAccessModeRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "accessMode") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &SetBucketAccessModeResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_set_bucket_cors_rules.go b/storagev2/apis/api_set_bucket_cors_rules.go new file mode 100644 index 00000000..3aade04a --- /dev/null +++ b/storagev2/apis/api_set_bucket_cors_rules.go @@ -0,0 +1,115 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/json" + auth "github.com/qiniu/go-sdk/v7/auth" + setbucketcorsrules "github.com/qiniu/go-sdk/v7/storagev2/apis/set_bucket_cors_rules" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerSetBucketCORSRulesRequest setbucketcorsrules.Request + +func (pp *innerSetBucketCORSRulesRequest) getBucketName(ctx context.Context) (string, error) { + return pp.Bucket, nil +} +func (path *innerSetBucketCORSRulesRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Bucket != "" { + allSegments = append(allSegments, path.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + return allSegments, nil +} +func (j *innerSetBucketCORSRulesRequest) MarshalJSON() ([]byte, error) { + return json.Marshal((*setbucketcorsrules.Request)(j)) +} +func (j *innerSetBucketCORSRulesRequest) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, (*setbucketcorsrules.Request)(j)) +} +func (request *innerSetBucketCORSRulesRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type SetBucketCORSRulesRequest = setbucketcorsrules.Request +type SetBucketCORSRulesResponse = setbucketcorsrules.Response + +// 设置空间的跨域规则 +func (storage *Storage) SetBucketCORSRules(ctx context.Context, request *SetBucketCORSRulesRequest, options *Options) (*SetBucketCORSRulesResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerSetBucketCORSRulesRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "corsRules", "set") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body, err := httpclient.GetJsonRequestBody(&innerRequest) + if err != nil { + return nil, err + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: body} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &SetBucketCORSRulesResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_set_bucket_max_age.go b/storagev2/apis/api_set_bucket_max_age.go new file mode 100644 index 00000000..cb85ce31 --- /dev/null +++ b/storagev2/apis/api_set_bucket_max_age.go @@ -0,0 +1,107 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + setbucketmaxage "github.com/qiniu/go-sdk/v7/storagev2/apis/set_bucket_max_age" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strconv" + "strings" +) + +type innerSetBucketMaxAgeRequest setbucketmaxage.Request + +func (query *innerSetBucketMaxAgeRequest) getBucketName(ctx context.Context) (string, error) { + return query.Bucket, nil +} +func (query *innerSetBucketMaxAgeRequest) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.Bucket != "" { + allQuery.Set("bucket", query.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + allQuery.Set("maxAge", strconv.FormatInt(query.MaxAge, 10)) + return allQuery, nil +} +func (request *innerSetBucketMaxAgeRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type SetBucketMaxAgeRequest = setbucketmaxage.Request +type SetBucketMaxAgeResponse = setbucketmaxage.Response + +// 设置存储空间的 cache-control: max-age 响应头 +func (storage *Storage) SetBucketMaxAge(ctx context.Context, request *SetBucketMaxAgeRequest, options *Options) (*SetBucketMaxAgeResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerSetBucketMaxAgeRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "maxAge") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &SetBucketMaxAgeResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_set_bucket_private.go b/storagev2/apis/api_set_bucket_private.go new file mode 100644 index 00000000..29430f2a --- /dev/null +++ b/storagev2/apis/api_set_bucket_private.go @@ -0,0 +1,106 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + setbucketprivate "github.com/qiniu/go-sdk/v7/storagev2/apis/set_bucket_private" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strconv" + "strings" +) + +type innerSetBucketPrivateRequest setbucketprivate.Request + +func (form *innerSetBucketPrivateRequest) getBucketName(ctx context.Context) (string, error) { + return form.Bucket, nil +} +func (form *innerSetBucketPrivateRequest) build() (url.Values, error) { + formValues := make(url.Values) + if form.Bucket != "" { + formValues.Set("bucket", form.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + formValues.Set("private", strconv.FormatInt(form.IsPrivate, 10)) + return formValues, nil +} +func (request *innerSetBucketPrivateRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type SetBucketPrivateRequest = setbucketprivate.Request +type SetBucketPrivateResponse = setbucketprivate.Response + +// 设置存储空间的访问权限 +func (storage *Storage) SetBucketPrivate(ctx context.Context, request *SetBucketPrivateRequest, options *Options) (*SetBucketPrivateResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerSetBucketPrivateRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "private") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body, err := innerRequest.build() + if err != nil { + return nil, err + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: httpclient.GetFormRequestBody(body)} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &SetBucketPrivateResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_set_bucket_quota.go b/storagev2/apis/api_set_bucket_quota.go new file mode 100644 index 00000000..90cd1d99 --- /dev/null +++ b/storagev2/apis/api_set_bucket_quota.go @@ -0,0 +1,111 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + setbucketquota "github.com/qiniu/go-sdk/v7/storagev2/apis/set_bucket_quota" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerSetBucketQuotaRequest setbucketquota.Request + +func (pp *innerSetBucketQuotaRequest) getBucketName(ctx context.Context) (string, error) { + return pp.Bucket, nil +} +func (path *innerSetBucketQuotaRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Bucket != "" { + allSegments = append(allSegments, path.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + if path.Size != 0 { + allSegments = append(allSegments, "size", strconv.FormatInt(path.Size, 10)) + } + if path.Count != 0 { + allSegments = append(allSegments, "count", strconv.FormatInt(path.Count, 10)) + } + return allSegments, nil +} +func (request *innerSetBucketQuotaRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type SetBucketQuotaRequest = setbucketquota.Request +type SetBucketQuotaResponse = setbucketquota.Response + +// 设置用户存储空间配额限制 +func (storage *Storage) SetBucketQuota(ctx context.Context, request *SetBucketQuotaRequest, options *Options) (*SetBucketQuotaResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerSetBucketQuotaRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "setbucketquota") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &SetBucketQuotaResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_set_bucket_remark.go b/storagev2/apis/api_set_bucket_remark.go new file mode 100644 index 00000000..ae463297 --- /dev/null +++ b/storagev2/apis/api_set_bucket_remark.go @@ -0,0 +1,115 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/json" + auth "github.com/qiniu/go-sdk/v7/auth" + setbucketremark "github.com/qiniu/go-sdk/v7/storagev2/apis/set_bucket_remark" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerSetBucketRemarkRequest setbucketremark.Request + +func (pp *innerSetBucketRemarkRequest) getBucketName(ctx context.Context) (string, error) { + return pp.Bucket, nil +} +func (path *innerSetBucketRemarkRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Bucket != "" { + allSegments = append(allSegments, path.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + return allSegments, nil +} +func (j *innerSetBucketRemarkRequest) MarshalJSON() ([]byte, error) { + return json.Marshal((*setbucketremark.Request)(j)) +} +func (j *innerSetBucketRemarkRequest) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, (*setbucketremark.Request)(j)) +} +func (request *innerSetBucketRemarkRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type SetBucketRemarkRequest = setbucketremark.Request +type SetBucketRemarkResponse = setbucketremark.Response + +// 设置空间备注 +func (storage *Storage) SetBucketRemark(ctx context.Context, request *SetBucketRemarkRequest, options *Options) (*SetBucketRemarkResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerSetBucketRemarkRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "buckets") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + rawQuery := "remark&" + body, err := httpclient.GetJsonRequestBody(&innerRequest) + if err != nil { + return nil, err + } + req := httpclient.Request{Method: "PUT", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: body} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &SetBucketRemarkResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_set_bucket_taggings.go b/storagev2/apis/api_set_bucket_taggings.go new file mode 100644 index 00000000..cd02ce46 --- /dev/null +++ b/storagev2/apis/api_set_bucket_taggings.go @@ -0,0 +1,116 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/json" + auth "github.com/qiniu/go-sdk/v7/auth" + setbuckettaggings "github.com/qiniu/go-sdk/v7/storagev2/apis/set_bucket_taggings" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strings" +) + +type innerSetBucketTaggingsRequest setbuckettaggings.Request + +func (query *innerSetBucketTaggingsRequest) getBucketName(ctx context.Context) (string, error) { + return query.Bucket, nil +} +func (query *innerSetBucketTaggingsRequest) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.Bucket != "" { + allQuery.Set("bucket", query.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + return allQuery, nil +} +func (j *innerSetBucketTaggingsRequest) MarshalJSON() ([]byte, error) { + return json.Marshal((*setbuckettaggings.Request)(j)) +} +func (j *innerSetBucketTaggingsRequest) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, (*setbuckettaggings.Request)(j)) +} +func (request *innerSetBucketTaggingsRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type SetBucketTaggingsRequest = setbuckettaggings.Request +type SetBucketTaggingsResponse = setbuckettaggings.Response + +// 设置存储空间的标签列表,包括新增和修改 +func (storage *Storage) SetBucketTaggings(ctx context.Context, request *SetBucketTaggingsRequest, options *Options) (*SetBucketTaggingsResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerSetBucketTaggingsRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "bucketTagging") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + body, err := httpclient.GetJsonRequestBody(&innerRequest) + if err != nil { + return nil, err + } + req := httpclient.Request{Method: "PUT", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: body} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &SetBucketTaggingsResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_set_buckets_mirror.go b/storagev2/apis/api_set_buckets_mirror.go new file mode 100644 index 00000000..3f861f54 --- /dev/null +++ b/storagev2/apis/api_set_buckets_mirror.go @@ -0,0 +1,115 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + setbucketsmirror "github.com/qiniu/go-sdk/v7/storagev2/apis/set_buckets_mirror" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strings" +) + +type innerSetBucketsMirrorRequest setbucketsmirror.Request + +func (pp *innerSetBucketsMirrorRequest) getBucketName(ctx context.Context) (string, error) { + return pp.Bucket, nil +} +func (path *innerSetBucketsMirrorRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Bucket != "" { + allSegments = append(allSegments, path.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + if path.SrcSiteUrl != "" { + allSegments = append(allSegments, "from", base64.URLEncoding.EncodeToString([]byte(path.SrcSiteUrl))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "SrcSiteUrl"} + } + if path.Host != "" { + allSegments = append(allSegments, "host", base64.URLEncoding.EncodeToString([]byte(path.Host))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Host"} + } + return allSegments, nil +} +func (request *innerSetBucketsMirrorRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type SetBucketsMirrorRequest = setbucketsmirror.Request +type SetBucketsMirrorResponse = setbucketsmirror.Response + +// 设置存储空间的镜像源 +func (storage *Storage) SetBucketsMirror(ctx context.Context, request *SetBucketsMirrorRequest, options *Options) (*SetBucketsMirrorResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerSetBucketsMirrorRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "image") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &SetBucketsMirrorResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_set_object_file_type.go b/storagev2/apis/api_set_object_file_type.go new file mode 100644 index 00000000..a4125727 --- /dev/null +++ b/storagev2/apis/api_set_object_file_type.go @@ -0,0 +1,115 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + setobjectfiletype "github.com/qiniu/go-sdk/v7/storagev2/apis/set_object_file_type" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "strconv" + "strings" +) + +type innerSetObjectFileTypeRequest setobjectfiletype.Request + +func (pp *innerSetObjectFileTypeRequest) getBucketName(ctx context.Context) (string, error) { + return strings.SplitN(pp.Entry, ":", 2)[0], nil +} +func (path *innerSetObjectFileTypeRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Entry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.Entry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Entry"} + } + allSegments = append(allSegments, "type", strconv.FormatInt(path.Type, 10)) + return allSegments, nil +} +func (request *innerSetObjectFileTypeRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type SetObjectFileTypeRequest = setobjectfiletype.Request +type SetObjectFileTypeResponse = setobjectfiletype.Response + +// 修改文件的存储类型信息,可以实现标准存储、低频存储和归档存储之间的互相转换 +func (storage *Storage) SetObjectFileType(ctx context.Context, request *SetObjectFileTypeRequest, options *Options) (*SetObjectFileTypeResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerSetObjectFileTypeRequest)(request) + serviceNames := []region.ServiceName{region.ServiceRs} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "chtype") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &SetObjectFileTypeResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/api_stat_object.go b/storagev2/apis/api_stat_object.go new file mode 100644 index 00000000..7d986ccf --- /dev/null +++ b/storagev2/apis/api_stat_object.go @@ -0,0 +1,127 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + "encoding/base64" + auth "github.com/qiniu/go-sdk/v7/auth" + statobject "github.com/qiniu/go-sdk/v7/storagev2/apis/stat_object" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strconv" + "strings" +) + +type innerStatObjectRequest statobject.Request + +func (pp *innerStatObjectRequest) getBucketName(ctx context.Context) (string, error) { + return strings.SplitN(pp.Entry, ":", 2)[0], nil +} +func (path *innerStatObjectRequest) buildPath() ([]string, error) { + var allSegments []string + if path.Entry != "" { + allSegments = append(allSegments, base64.URLEncoding.EncodeToString([]byte(path.Entry))) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Entry"} + } + return allSegments, nil +} +func (query *innerStatObjectRequest) buildQuery() (url.Values, error) { + allQuery := make(url.Values) + if query.NeedParts { + allQuery.Set("needparts", strconv.FormatBool(query.NeedParts)) + } + return allQuery, nil +} +func (request *innerStatObjectRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type StatObjectRequest = statobject.Request +type StatObjectResponse = statobject.Response + +// 仅获取对象的元信息,不返回对象的内容 +func (storage *Storage) StatObject(ctx context.Context, request *StatObjectRequest, options *Options) (*StatObjectResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerStatObjectRequest)(request) + serviceNames := []region.ServiceName{region.ServiceRs} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "stat") + if segments, err := innerRequest.buildPath(); err != nil { + return nil, err + } else { + pathSegments = append(pathSegments, segments...) + } + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + if query, err := innerRequest.buildQuery(); err != nil { + return nil, err + } else { + rawQuery += query.Encode() + } + req := httpclient.Request{Method: "GET", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, BufferResponse: true} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + var err error + if options.OverwrittenBucketHosts != nil { + if bucketHosts, err = options.OverwrittenBucketHosts.GetEndpoints(ctx); err != nil { + return nil, err + } + } + queryOptions := region.BucketRegionsQueryOptions{UseInsecureProtocol: storage.client.UseInsecureProtocol(), HostFreezeDuration: storage.client.GetHostFreezeDuration(), Client: storage.client.GetClient()} + if hostRetryConfig := storage.client.GetHostRetryConfig(); hostRetryConfig != nil { + queryOptions.RetryMax = hostRetryConfig.RetryMax + } + if query, err = region.NewBucketRegionsQuery(bucketHosts, &queryOptions); err != nil { + return nil, err + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + var respBody StatObjectResponse + if err := storage.client.DoAndAcceptJSON(ctx, &req, &respBody); err != nil { + return nil, err + } + return &respBody, nil +} diff --git a/storagev2/apis/api_update_bucket_rules.go b/storagev2/apis/api_update_bucket_rules.go new file mode 100644 index 00000000..160f5ff3 --- /dev/null +++ b/storagev2/apis/api_update_bucket_rules.go @@ -0,0 +1,118 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + "context" + auth "github.com/qiniu/go-sdk/v7/auth" + updatebucketrules "github.com/qiniu/go-sdk/v7/storagev2/apis/update_bucket_rules" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" + "net/url" + "strconv" + "strings" +) + +type innerUpdateBucketRulesRequest updatebucketrules.Request + +func (form *innerUpdateBucketRulesRequest) getBucketName(ctx context.Context) (string, error) { + return form.Bucket, nil +} +func (form *innerUpdateBucketRulesRequest) build() (url.Values, error) { + formValues := make(url.Values) + if form.Bucket != "" { + formValues.Set("bucket", form.Bucket) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Bucket"} + } + if form.Name != "" { + formValues.Set("name", form.Name) + } else { + return nil, errors.MissingRequiredFieldError{Name: "Name"} + } + if form.Prefix != "" { + formValues.Set("prefix", form.Prefix) + } + formValues.Set("delete_after_days", strconv.FormatInt(form.DeleteAfterDays, 10)) + formValues.Set("to_line_after_days", strconv.FormatInt(form.ToIaAfterDays, 10)) + formValues.Set("to_archive_after_days", strconv.FormatInt(form.ToArchiveAfterDays, 10)) + formValues.Set("to_deep_archive_after_days", strconv.FormatInt(form.ToDeepArchiveAfterDays, 10)) + formValues.Set("to_archive_ir_after_days", strconv.FormatInt(form.ToArchiveIrAfterDays, 10)) + return formValues, nil +} +func (request *innerUpdateBucketRulesRequest) getAccessKey(ctx context.Context) (string, error) { + if request.Credentials != nil { + if credentials, err := request.Credentials.Get(ctx); err != nil { + return "", err + } else { + return credentials.AccessKey, nil + } + } + return "", nil +} + +type UpdateBucketRulesRequest = updatebucketrules.Request +type UpdateBucketRulesResponse = updatebucketrules.Response + +// 更新空间规则 +func (storage *Storage) UpdateBucketRules(ctx context.Context, request *UpdateBucketRulesRequest, options *Options) (*UpdateBucketRulesResponse, error) { + if options == nil { + options = &Options{} + } + innerRequest := (*innerUpdateBucketRulesRequest)(request) + serviceNames := []region.ServiceName{region.ServiceBucket} + if innerRequest.Credentials == nil && storage.client.GetCredentials() == nil { + return nil, errors.MissingRequiredFieldError{Name: "Credentials"} + } + var pathSegments []string + pathSegments = append(pathSegments, "rules", "update") + path := "/" + strings.Join(pathSegments, "/") + var rawQuery string + body, err := innerRequest.build() + if err != nil { + return nil, err + } + req := httpclient.Request{Method: "POST", ServiceNames: serviceNames, Path: path, RawQuery: rawQuery, Endpoints: options.OverwrittenEndpoints, Region: options.OverwrittenRegion, AuthType: auth.TokenQiniu, Credentials: innerRequest.Credentials, RequestBody: httpclient.GetFormRequestBody(body)} + if options.OverwrittenEndpoints == nil && options.OverwrittenRegion == nil && storage.client.GetRegions() == nil { + query := storage.client.GetBucketQuery() + if query == nil { + bucketHosts := httpclient.DefaultBucketHosts() + if options.OverwrittenBucketHosts != nil { + req.Endpoints = options.OverwrittenBucketHosts + } else { + req.Endpoints = bucketHosts + } + } + if query != nil { + bucketName := options.OverwrittenBucketName + var accessKey string + var err error + if bucketName == "" { + if bucketName, err = innerRequest.getBucketName(ctx); err != nil { + return nil, err + } + } + if accessKey, err = innerRequest.getAccessKey(ctx); err != nil { + return nil, err + } + if accessKey == "" { + if credentialsProvider := storage.client.GetCredentials(); credentialsProvider != nil { + if creds, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else if creds != nil { + accessKey = creds.AccessKey + } + } + } + if accessKey != "" && bucketName != "" { + req.Region = query.Query(accessKey, bucketName) + } + } + } + resp, err := storage.client.Do(ctx, &req) + if err != nil { + return nil, err + } + return &UpdateBucketRulesResponse{}, resp.Body.Close() +} diff --git a/storagev2/apis/apis.go b/storagev2/apis/apis.go new file mode 100644 index 00000000..0771b0ed --- /dev/null +++ b/storagev2/apis/apis.go @@ -0,0 +1,26 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +package apis + +import ( + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + region "github.com/qiniu/go-sdk/v7/storagev2/region" +) + +// API 客户端 +type Storage struct { + client *httpclient.Client +} + +// 创建 API 客户端 +func NewStorage(options *httpclient.Options) *Storage { + return &Storage{client: httpclient.NewClient(options)} +} + +// API 客户端选项 +type Options struct { + OverwrittenBucketHosts region.EndpointsProvider + OverwrittenBucketName string + OverwrittenEndpoints region.EndpointsProvider + OverwrittenRegion region.RegionsProvider +} diff --git a/storagev2/apis/async_fetch_object/api.go b/storagev2/apis/async_fetch_object/api.go new file mode 100644 index 00000000..95db2fba --- /dev/null +++ b/storagev2/apis/async_fetch_object/api.go @@ -0,0 +1,117 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 从指定 URL 抓取资源,并将该资源存储到指定空间中。每次只抓取一个文件,抓取时可以指定保存空间名和最终资源名 +package async_fetch_object + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider + Url string // 需要抓取的 URL,支持设置多个用于高可用,以’;'分隔,当指定多个 URL 时可以在前一个 URL 抓取失败时重试下一个 + Bucket string // 所在区域的存储空间 + Host string // 从指定 URL 下载数据时使用的 Host + Key string // 对象名称,如果不传,则默认为文件的哈希值 + Md5 string // 文件 MD5,传入以后会在存入存储时对文件做校验,校验失败则不存入指定空间 + Etag string // 对象内容的 ETag,传入以后会在存入存储时对文件做校验,校验失败则不存入指定空间 + CallbackUrl string // 回调 URL + CallbackBody string // 回调负荷,如果 callback_url 不为空则必须指定 + CallbackBodyType string // 回调负荷内容类型,默认为 "application/x-www-form-urlencoded" + CallbackHost string // 回调时使用的 Host + FileType int64 // 存储文件类型 `0`: 标准存储(默认),`1`: 低频存储,`2`: 归档存储 + IgnoreSameKey bool // 如果空间中已经存在同名文件则放弃本次抓取(仅对比对象名称,不校验文件内容) +} + +// 要抓取的资源信息 +type NewFetchTaskParams = Request +type jsonRequest struct { + Url string `json:"url"` // 需要抓取的 URL,支持设置多个用于高可用,以’;'分隔,当指定多个 URL 时可以在前一个 URL 抓取失败时重试下一个 + Bucket string `json:"bucket"` // 所在区域的存储空间 + Host string `json:"host,omitempty"` // 从指定 URL 下载数据时使用的 Host + Key string `json:"key,omitempty"` // 对象名称,如果不传,则默认为文件的哈希值 + Md5 string `json:"md5,omitempty"` // 文件 MD5,传入以后会在存入存储时对文件做校验,校验失败则不存入指定空间 + Etag string `json:"etag,omitempty"` // 对象内容的 ETag,传入以后会在存入存储时对文件做校验,校验失败则不存入指定空间 + CallbackUrl string `json:"callbackurl,omitempty"` // 回调 URL + CallbackBody string `json:"callbackbody,omitempty"` // 回调负荷,如果 callback_url 不为空则必须指定 + CallbackBodyType string `json:"callbackbodytype,omitempty"` // 回调负荷内容类型,默认为 "application/x-www-form-urlencoded" + CallbackHost string `json:"callbackhost,omitempty"` // 回调时使用的 Host + FileType int64 `json:"file_type"` // 存储文件类型 `0`: 标准存储(默认),`1`: 低频存储,`2`: 归档存储 + IgnoreSameKey bool `json:"ignore_same_key,omitempty"` // 如果空间中已经存在同名文件则放弃本次抓取(仅对比对象名称,不校验文件内容) +} + +func (j *Request) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonRequest{Url: j.Url, Bucket: j.Bucket, Host: j.Host, Key: j.Key, Md5: j.Md5, Etag: j.Etag, CallbackUrl: j.CallbackUrl, CallbackBody: j.CallbackBody, CallbackBodyType: j.CallbackBodyType, CallbackHost: j.CallbackHost, FileType: j.FileType, IgnoreSameKey: j.IgnoreSameKey}) +} +func (j *Request) UnmarshalJSON(data []byte) error { + var nj jsonRequest + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Url = nj.Url + j.Bucket = nj.Bucket + j.Host = nj.Host + j.Key = nj.Key + j.Md5 = nj.Md5 + j.Etag = nj.Etag + j.CallbackUrl = nj.CallbackUrl + j.CallbackBody = nj.CallbackBody + j.CallbackBodyType = nj.CallbackBodyType + j.CallbackHost = nj.CallbackHost + j.FileType = nj.FileType + j.IgnoreSameKey = nj.IgnoreSameKey + return nil +} +func (j *Request) validate() error { + if j.Url == "" { + return errors.MissingRequiredFieldError{Name: "Url"} + } + if j.Bucket == "" { + return errors.MissingRequiredFieldError{Name: "Bucket"} + } + return nil +} + +// 获取 API 所用的响应 +type Response struct { + Id string // 异步任务 ID + QueuedTasksCount int64 // 当前任务前面的排队任务数量,`0` 表示当前任务正在进行,`-1` 表示任务已经至少被处理过一次(可能会进入重试逻辑) +} + +// 返回的异步任务信息 +type NewFetchTaskInfo = Response +type jsonResponse struct { + Id string `json:"id"` // 异步任务 ID + QueuedTasksCount int64 `json:"wait"` // 当前任务前面的排队任务数量,`0` 表示当前任务正在进行,`-1` 表示任务已经至少被处理过一次(可能会进入重试逻辑) +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{Id: j.Id, QueuedTasksCount: j.QueuedTasksCount}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Id = nj.Id + j.QueuedTasksCount = nj.QueuedTasksCount + return nil +} +func (j *Response) validate() error { + if j.Id == "" { + return errors.MissingRequiredFieldError{Name: "Id"} + } + if j.QueuedTasksCount == 0 { + return errors.MissingRequiredFieldError{Name: "QueuedTasksCount"} + } + return nil +} diff --git a/storagev2/apis/batch_ops/api.go b/storagev2/apis/batch_ops/api.go new file mode 100644 index 00000000..22950731 --- /dev/null +++ b/storagev2/apis/batch_ops/api.go @@ -0,0 +1,139 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 批量操作意指在单一请求中执行多次(最大限制1000次) 查询元信息、修改元信息、移动、复制、删除、修改状态、修改存储类型、修改生命周期和解冻操作,极大提高对象管理效率。其中,解冻操作仅针对归档存储文件有效 +package batch_ops + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider + Operations []string // 单一对象管理指令 +} + +// 获取 API 所用的响应 +type Response struct { + OperationResponses OperationResponses // 所有管理指令的响应信息 +} + +// 响应数据 +type Data struct { + Error string // 管理指令的错误信息,仅在发生错误时才返回 + Size int64 // 对象大小,单位为字节,仅对 stat 指令才有效 + Hash string // 对象哈希值,仅对 stat 指令才有效 + MimeType string // 对象 MIME 类型,仅对 stat 指令才有效 + Type int64 // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储,仅对 stat 指令才有效 + PutTime int64 // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒,仅对 stat 指令才有效 + EndUser string // 资源内容的唯一属主标识 + RestoringStatus int64 // 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段,仅对 stat 指令才有效 + Status int64 // 文件状态。`1` 表示禁用;只有禁用状态的文件才会返回该字段,仅对 stat 指令才有效 + Md5 string // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回,仅对 stat 指令才有效 + ExpirationTime int64 // 文件过期删除日期,UNIX 时间戳格式,文件在设置过期时间后才会返回该字段,仅对 stat 指令才有效 + TransitionToIaTime int64 // 文件生命周期中转为低频存储的日期,UNIX 时间戳格式,文件在设置转低频后才会返回该字段,仅对 stat 指令才有效 + TransitionToArchiveTime int64 // 文件生命周期中转为归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段,仅对 stat 指令才有效 + TransitionToDeepArchiveTime int64 // 文件生命周期中转为深度归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段,仅对 stat 指令才有效 + TransitionToArchiveIrTime int64 // 文件生命周期中转为归档直读存储的日期,UNIX 时间戳格式,文件在设置转归档直读后才会返回该字段,仅对 stat 指令才有效 +} + +// 管理指令的响应数据 +type OperationResponseData = Data +type jsonData struct { + Error string `json:"error,omitempty"` // 管理指令的错误信息,仅在发生错误时才返回 + Size int64 `json:"fsize,omitempty"` // 对象大小,单位为字节,仅对 stat 指令才有效 + Hash string `json:"hash,omitempty"` // 对象哈希值,仅对 stat 指令才有效 + MimeType string `json:"mimeType,omitempty"` // 对象 MIME 类型,仅对 stat 指令才有效 + Type int64 `json:"type,omitempty"` // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储,仅对 stat 指令才有效 + PutTime int64 `json:"putTime,omitempty"` // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒,仅对 stat 指令才有效 + EndUser string `json:"endUser,omitempty"` // 资源内容的唯一属主标识 + RestoringStatus int64 `json:"restoreStatus,omitempty"` // 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段,仅对 stat 指令才有效 + Status int64 `json:"status,omitempty"` // 文件状态。`1` 表示禁用;只有禁用状态的文件才会返回该字段,仅对 stat 指令才有效 + Md5 string `json:"md5,omitempty"` // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回,仅对 stat 指令才有效 + ExpirationTime int64 `json:"expiration,omitempty"` // 文件过期删除日期,UNIX 时间戳格式,文件在设置过期时间后才会返回该字段,仅对 stat 指令才有效 + TransitionToIaTime int64 `json:"transitionToIA,omitempty"` // 文件生命周期中转为低频存储的日期,UNIX 时间戳格式,文件在设置转低频后才会返回该字段,仅对 stat 指令才有效 + TransitionToArchiveTime int64 `json:"transitionToARCHIVE,omitempty"` // 文件生命周期中转为归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段,仅对 stat 指令才有效 + TransitionToDeepArchiveTime int64 `json:"transitionToDeepArchive,omitempty"` // 文件生命周期中转为深度归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段,仅对 stat 指令才有效 + TransitionToArchiveIrTime int64 `json:"transitionToArchiveIR,omitempty"` // 文件生命周期中转为归档直读存储的日期,UNIX 时间戳格式,文件在设置转归档直读后才会返回该字段,仅对 stat 指令才有效 +} + +func (j *Data) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonData{Error: j.Error, Size: j.Size, Hash: j.Hash, MimeType: j.MimeType, Type: j.Type, PutTime: j.PutTime, EndUser: j.EndUser, RestoringStatus: j.RestoringStatus, Status: j.Status, Md5: j.Md5, ExpirationTime: j.ExpirationTime, TransitionToIaTime: j.TransitionToIaTime, TransitionToArchiveTime: j.TransitionToArchiveTime, TransitionToDeepArchiveTime: j.TransitionToDeepArchiveTime, TransitionToArchiveIrTime: j.TransitionToArchiveIrTime}) +} +func (j *Data) UnmarshalJSON(data []byte) error { + var nj jsonData + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Error = nj.Error + j.Size = nj.Size + j.Hash = nj.Hash + j.MimeType = nj.MimeType + j.Type = nj.Type + j.PutTime = nj.PutTime + j.EndUser = nj.EndUser + j.RestoringStatus = nj.RestoringStatus + j.Status = nj.Status + j.Md5 = nj.Md5 + j.ExpirationTime = nj.ExpirationTime + j.TransitionToIaTime = nj.TransitionToIaTime + j.TransitionToArchiveTime = nj.TransitionToArchiveTime + j.TransitionToDeepArchiveTime = nj.TransitionToDeepArchiveTime + j.TransitionToArchiveIrTime = nj.TransitionToArchiveIrTime + return nil +} +func (j *Data) validate() error { + return nil +} + +// 每个管理指令的响应信息 +type OperationResponse struct { + Code int64 // 响应状态码 + Data OperationResponseData // 响应数据 +} +type jsonOperationResponse struct { + Code int64 `json:"code"` // 响应状态码 + Data OperationResponseData `json:"data,omitempty"` // 响应数据 +} + +func (j *OperationResponse) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonOperationResponse{Code: j.Code, Data: j.Data}) +} +func (j *OperationResponse) UnmarshalJSON(data []byte) error { + var nj jsonOperationResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Code = nj.Code + j.Data = nj.Data + return nil +} +func (j *OperationResponse) validate() error { + if j.Code == 0 { + return errors.MissingRequiredFieldError{Name: "Code"} + } + return nil +} + +// 所有管理指令的响应信息 +type OperationResponses []OperationResponse + +func (j *Response) MarshalJSON() ([]byte, error) { + return json.Marshal(j.OperationResponses) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var array OperationResponses + if err := json.Unmarshal(data, &array); err != nil { + return err + } + j.OperationResponses = array + return nil +} diff --git a/storagev2/apis/copy_object/api.go b/storagev2/apis/copy_object/api.go new file mode 100644 index 00000000..091720a8 --- /dev/null +++ b/storagev2/apis/copy_object/api.go @@ -0,0 +1,17 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 将源空间的指定对象复制到目标空间 +package copy_object + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + SrcEntry string // 指定源对象空间与源对象名称,格式为 <源对象空间>:<源对象名称> + DestEntry string // 指定目标对象空间与目标对象名称,格式为 <目标对象空间>:<目标对象名称> + IsForce bool // 如果目标对象名已被占用,则返回错误码 614,且不做任何覆盖操作;如果指定为 true,会强制覆盖目标对象 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/create_bucket/api.go b/storagev2/apis/create_bucket/api.go new file mode 100644 index 00000000..377e99ce --- /dev/null +++ b/storagev2/apis/create_bucket/api.go @@ -0,0 +1,16 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 创建一个新的存储空间 +package create_bucket + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 空间名称,要求在对象存储系统范围内唯一,由 3~63 个字符组成,支持小写字母、短划线-和数字,且必须以小写字母或数字开头和结尾 + Region string // 存储区域 ID,默认 z0 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/delete_bucket/api.go b/storagev2/apis/delete_bucket/api.go new file mode 100644 index 00000000..f9c294c7 --- /dev/null +++ b/storagev2/apis/delete_bucket/api.go @@ -0,0 +1,15 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 删除指定的存储空间 +package delete_bucket + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 需要删除的目标空间名 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/delete_bucket_rules/api.go b/storagev2/apis/delete_bucket_rules/api.go new file mode 100644 index 00000000..7e67f7fd --- /dev/null +++ b/storagev2/apis/delete_bucket_rules/api.go @@ -0,0 +1,16 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 删除空间规则 +package delete_bucket_rules + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider + Bucket string // 空间名称 + Name string // 要删除的规则名称 +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/delete_bucket_taggings/api.go b/storagev2/apis/delete_bucket_taggings/api.go new file mode 100644 index 00000000..eed8ea3b --- /dev/null +++ b/storagev2/apis/delete_bucket_taggings/api.go @@ -0,0 +1,15 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 一键删除指定存储空间的所有标签 +package delete_bucket_taggings + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + BucketName string // 空间名称 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/delete_object/api.go b/storagev2/apis/delete_object/api.go new file mode 100644 index 00000000..5e6dc042 --- /dev/null +++ b/storagev2/apis/delete_object/api.go @@ -0,0 +1,15 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 删除指定对象 +package delete_object + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Entry string // 指定目标对象空间与目标对象名称,格式为 <目标对象空间>:<目标对象名称> + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/delete_object_after_days/api.go b/storagev2/apis/delete_object_after_days/api.go new file mode 100644 index 00000000..fcf88579 --- /dev/null +++ b/storagev2/apis/delete_object_after_days/api.go @@ -0,0 +1,16 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 更新文件生命周期 +package delete_object_after_days + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Entry string // 指定目标对象空间与目标对象名称,格式为 <目标对象空间>:<目标对象名称> + DeleteAfterDays int64 // 指定文件上传后在设置的 DeleteAfterDays 过期删除,删除后不可恢复,设置为 0 表示取消已设置的过期删除的生命周期规则 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/fetch_object/api.go b/storagev2/apis/fetch_object/api.go new file mode 100644 index 00000000..7313c272 --- /dev/null +++ b/storagev2/apis/fetch_object/api.go @@ -0,0 +1,65 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 从指定 URL 抓取指定名称的对象并存储到该空间中 +package fetch_object + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + FromUrl string // 指定抓取的 URL + ToEntry string // 指定目标对象空间与目标对象名称,格式为 <目标对象空间>:<目标对象名称> + Host string // 指定抓取 URL 请求用的 HOST 参数 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + Hash string // 抓取的对象内容的 Etag 值 + ObjectName string // 抓取后保存的对象名称 + Size int64 // 对象大小,单位为字节 + MimeType string // 对象 MIME 类型 +} + +// 抓取到的文件元信息 +type FetchedObjectMetadata = Response +type jsonResponse struct { + Hash string `json:"hash"` // 抓取的对象内容的 Etag 值 + ObjectName string `json:"key"` // 抓取后保存的对象名称 + Size int64 `json:"fsize,omitempty"` // 对象大小,单位为字节 + MimeType string `json:"mimeType"` // 对象 MIME 类型 +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{Hash: j.Hash, ObjectName: j.ObjectName, Size: j.Size, MimeType: j.MimeType}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Hash = nj.Hash + j.ObjectName = nj.ObjectName + j.Size = nj.Size + j.MimeType = nj.MimeType + return nil +} +func (j *Response) validate() error { + if j.Hash == "" { + return errors.MissingRequiredFieldError{Name: "Hash"} + } + if j.ObjectName == "" { + return errors.MissingRequiredFieldError{Name: "ObjectName"} + } + if j.MimeType == "" { + return errors.MissingRequiredFieldError{Name: "MimeType"} + } + return nil +} diff --git a/storagev2/apis/get_async_fetch_task/api.go b/storagev2/apis/get_async_fetch_task/api.go new file mode 100644 index 00000000..a18e1e4c --- /dev/null +++ b/storagev2/apis/get_async_fetch_task/api.go @@ -0,0 +1,54 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 查询异步抓取任务 +package get_async_fetch_task + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + Id string // 异步任务 ID + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + Id string // 异步任务 ID + QueuedTasksCount int64 // 当前任务前面的排队任务数量,`0` 表示当前任务正在进行,`-1` 表示任务已经至少被处理过一次(可能会进入重试逻辑) +} + +// 返回的异步任务信息 +type FetchTaskInfo = Response +type jsonResponse struct { + Id string `json:"id"` // 异步任务 ID + QueuedTasksCount int64 `json:"wait"` // 当前任务前面的排队任务数量,`0` 表示当前任务正在进行,`-1` 表示任务已经至少被处理过一次(可能会进入重试逻辑) +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{Id: j.Id, QueuedTasksCount: j.QueuedTasksCount}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Id = nj.Id + j.QueuedTasksCount = nj.QueuedTasksCount + return nil +} +func (j *Response) validate() error { + if j.Id == "" { + return errors.MissingRequiredFieldError{Name: "Id"} + } + if j.QueuedTasksCount == 0 { + return errors.MissingRequiredFieldError{Name: "QueuedTasksCount"} + } + return nil +} diff --git a/storagev2/apis/get_bucket_cors_rules/api.go b/storagev2/apis/get_bucket_cors_rules/api.go new file mode 100644 index 00000000..be59a456 --- /dev/null +++ b/storagev2/apis/get_bucket_cors_rules/api.go @@ -0,0 +1,92 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 获取空间的跨域规则 +package get_bucket_cors_rules + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 指定空间名称 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + CORSRules CORSRules // 跨域规则列表 +} + +// 允许的域名列表 +type AllowedOriginHosts = []string + +// 允许的方法列表 +type AllowedMethods = []string + +// 允许的 Header 列表 +type AllowedHeaders = []string + +// 暴露的 Header 列表 +type ExposedHeaders = []string + +// 跨域规则 +type CORSRule struct { + AllowedOrigin AllowedOriginHosts // 允许的域名。必填;支持通配符 * ;*表示全部匹配;只有第一个 * 生效;需要设置 "Scheme";大小写敏感 + AllowedMethod AllowedMethods // 允许的方法。必填;不支持通配符;大小写不敏感; + AllowedHeader AllowedHeaders + ExposedHeader ExposedHeaders // 选填;不支持通配符;X-Log, X-Reqid 是默认会暴露的两个 header;其他的 header 如果没有设置,则不会暴露;大小写不敏感; + MaxAge int64 // 结果可以缓存的时间。选填;空则不缓存 +} +type jsonCORSRule struct { + AllowedOrigin AllowedOriginHosts `json:"allowed_origin"` // 允许的域名。必填;支持通配符 * ;*表示全部匹配;只有第一个 * 生效;需要设置 "Scheme";大小写敏感 + AllowedMethod AllowedMethods `json:"allowed_method"` // 允许的方法。必填;不支持通配符;大小写不敏感; + AllowedHeader AllowedHeaders `json:"allowed_header,omitempty"` + ExposedHeader ExposedHeaders `json:"exposed_header,omitempty"` // 选填;不支持通配符;X-Log, X-Reqid 是默认会暴露的两个 header;其他的 header 如果没有设置,则不会暴露;大小写不敏感; + MaxAge int64 `json:"max_age,omitempty"` // 结果可以缓存的时间。选填;空则不缓存 +} + +func (j *CORSRule) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonCORSRule{AllowedOrigin: j.AllowedOrigin, AllowedMethod: j.AllowedMethod, AllowedHeader: j.AllowedHeader, ExposedHeader: j.ExposedHeader, MaxAge: j.MaxAge}) +} +func (j *CORSRule) UnmarshalJSON(data []byte) error { + var nj jsonCORSRule + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.AllowedOrigin = nj.AllowedOrigin + j.AllowedMethod = nj.AllowedMethod + j.AllowedHeader = nj.AllowedHeader + j.ExposedHeader = nj.ExposedHeader + j.MaxAge = nj.MaxAge + return nil +} +func (j *CORSRule) validate() error { + if len(j.AllowedOrigin) == 0 { + return errors.MissingRequiredFieldError{Name: "AllowedOrigin"} + } + if len(j.AllowedMethod) == 0 { + return errors.MissingRequiredFieldError{Name: "AllowedMethod"} + } + return nil +} + +// 跨域规则列表 +type CORSRules []CORSRule + +func (j *Response) MarshalJSON() ([]byte, error) { + return json.Marshal(j.CORSRules) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var array CORSRules + if err := json.Unmarshal(data, &array); err != nil { + return err + } + j.CORSRules = array + return nil +} diff --git a/storagev2/apis/get_bucket_domains/api.go b/storagev2/apis/get_bucket_domains/api.go new file mode 100644 index 00000000..93909ae3 --- /dev/null +++ b/storagev2/apis/get_bucket_domains/api.go @@ -0,0 +1,35 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 获取存储空间的域名列表 +package get_bucket_domains + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" +) + +// 调用 API 所用的请求 +type Request struct { + BucketName string // 要获取域名列表的目标空间名称 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + Domains Domains // 存储空间的域名列表 +} + +// 存储空间的域名列表 +type Domains []string + +func (j *Response) MarshalJSON() ([]byte, error) { + return json.Marshal(j.Domains) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var array Domains + if err := json.Unmarshal(data, &array); err != nil { + return err + } + j.Domains = array + return nil +} diff --git a/storagev2/apis/get_bucket_quota/api.go b/storagev2/apis/get_bucket_quota/api.go new file mode 100644 index 00000000..3964cb5c --- /dev/null +++ b/storagev2/apis/get_bucket_quota/api.go @@ -0,0 +1,44 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 获取用户存储空间配额限制 +package get_bucket_quota + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" +) + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 指定存储空间 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + Size int64 // 空间存储量配额 + Count int64 // 空间文件数配额 +} +type jsonResponse struct { + Size int64 `json:"size,omitempty"` // 空间存储量配额 + Count int64 `json:"count,omitempty"` // 空间文件数配额 +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{Size: j.Size, Count: j.Count}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Size = nj.Size + j.Count = nj.Count + return nil +} +func (j *Response) validate() error { + return nil +} diff --git a/storagev2/apis/get_bucket_rules/api.go b/storagev2/apis/get_bucket_rules/api.go new file mode 100644 index 00000000..9c1a9f97 --- /dev/null +++ b/storagev2/apis/get_bucket_rules/api.go @@ -0,0 +1,92 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 获取空间规则 +package get_bucket_rules + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 空间名称 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + BucketRules BucketRules // 空间规则列表 +} + +// 空间规则 +type BucketRule struct { + Name string // 空间规则名称 + Prefix string // 匹配的对象名称前缀 + DeleteAfterDays int64 // 上传文件多少天后删除 + ToIaAfterDays int64 // 文件上传多少天后转低频存储 + ToArchiveAfterDays int64 // 文件上传多少天后转归档存储 + ToDeepArchiveAfterDays int64 // 文件上传多少天后转深度归档存储 + ToArchiveIrAfterDays int64 // 文件上传多少天后转归档直读存储 + CreatedTime string // 规则创建时间 +} +type jsonBucketRule struct { + Name string `json:"name"` // 空间规则名称 + Prefix string `json:"prefix"` // 匹配的对象名称前缀 + DeleteAfterDays int64 `json:"delete_after_days,omitempty"` // 上传文件多少天后删除 + ToIaAfterDays int64 `json:"to_line_after_days,omitempty"` // 文件上传多少天后转低频存储 + ToArchiveAfterDays int64 `json:"to_archive_after_days,omitempty"` // 文件上传多少天后转归档存储 + ToDeepArchiveAfterDays int64 `json:"to_deep_archive_after_days,omitempty"` // 文件上传多少天后转深度归档存储 + ToArchiveIrAfterDays int64 `json:"to_archive_ir_after_days,omitempty"` // 文件上传多少天后转归档直读存储 + CreatedTime string `json:"ctime"` // 规则创建时间 +} + +func (j *BucketRule) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonBucketRule{Name: j.Name, Prefix: j.Prefix, DeleteAfterDays: j.DeleteAfterDays, ToIaAfterDays: j.ToIaAfterDays, ToArchiveAfterDays: j.ToArchiveAfterDays, ToDeepArchiveAfterDays: j.ToDeepArchiveAfterDays, ToArchiveIrAfterDays: j.ToArchiveIrAfterDays, CreatedTime: j.CreatedTime}) +} +func (j *BucketRule) UnmarshalJSON(data []byte) error { + var nj jsonBucketRule + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Name = nj.Name + j.Prefix = nj.Prefix + j.DeleteAfterDays = nj.DeleteAfterDays + j.ToIaAfterDays = nj.ToIaAfterDays + j.ToArchiveAfterDays = nj.ToArchiveAfterDays + j.ToDeepArchiveAfterDays = nj.ToDeepArchiveAfterDays + j.ToArchiveIrAfterDays = nj.ToArchiveIrAfterDays + j.CreatedTime = nj.CreatedTime + return nil +} +func (j *BucketRule) validate() error { + if j.Name == "" { + return errors.MissingRequiredFieldError{Name: "Name"} + } + if j.Prefix == "" { + return errors.MissingRequiredFieldError{Name: "Prefix"} + } + if j.CreatedTime == "" { + return errors.MissingRequiredFieldError{Name: "CreatedTime"} + } + return nil +} + +// 空间规则列表 +type BucketRules []BucketRule + +func (j *Response) MarshalJSON() ([]byte, error) { + return json.Marshal(j.BucketRules) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var array BucketRules + if err := json.Unmarshal(data, &array); err != nil { + return err + } + j.BucketRules = array + return nil +} diff --git a/storagev2/apis/get_bucket_taggings/api.go b/storagev2/apis/get_bucket_taggings/api.go new file mode 100644 index 00000000..f6222300 --- /dev/null +++ b/storagev2/apis/get_bucket_taggings/api.go @@ -0,0 +1,91 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 查询指定的存储空间已设置的标签信息 +package get_bucket_taggings + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + BucketName string // 空间名称 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + Tags Tags // 标签列表 +} + +// 标签键值对 +type TagInfo struct { + Key string // 标签名称,最大 64 Byte,不能为空且大小写敏感,不能以 kodo 为前缀(预留), 不支持中文字符,可使用的字符有:字母,数字,空格,+ - = . _ : / @ + Value string // 标签值,最大 128 Byte,不能为空且大小写敏感,不支持中文字符,可使用的字符有:字母,数字,空格,+ - = . _ : / @ +} +type jsonTagInfo struct { + Key string `json:"Key"` // 标签名称,最大 64 Byte,不能为空且大小写敏感,不能以 kodo 为前缀(预留), 不支持中文字符,可使用的字符有:字母,数字,空格,+ - = . _ : / @ + Value string `json:"Value"` // 标签值,最大 128 Byte,不能为空且大小写敏感,不支持中文字符,可使用的字符有:字母,数字,空格,+ - = . _ : / @ +} + +func (j *TagInfo) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonTagInfo{Key: j.Key, Value: j.Value}) +} +func (j *TagInfo) UnmarshalJSON(data []byte) error { + var nj jsonTagInfo + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Key = nj.Key + j.Value = nj.Value + return nil +} +func (j *TagInfo) validate() error { + if j.Key == "" { + return errors.MissingRequiredFieldError{Name: "Key"} + } + if j.Value == "" { + return errors.MissingRequiredFieldError{Name: "Value"} + } + return nil +} + +// 标签列表 +type Tags = []TagInfo + +// 存储空间标签信息 +type TagsInfo = Response +type jsonResponse struct { + Tags Tags `json:"Tags"` // 标签列表 +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{Tags: j.Tags}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Tags = nj.Tags + return nil +} +func (j *Response) validate() error { + if len(j.Tags) == 0 { + return errors.MissingRequiredFieldError{Name: "Tags"} + } + for _, value := range j.Tags { + if err := value.validate(); err != nil { + return err + } + } + return nil +} diff --git a/storagev2/apis/get_buckets/api.go b/storagev2/apis/get_buckets/api.go new file mode 100644 index 00000000..7cc24d41 --- /dev/null +++ b/storagev2/apis/get_buckets/api.go @@ -0,0 +1,35 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 获取拥有的所有存储空间列表 +package get_buckets + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" +) + +// 调用 API 所用的请求 +type Request struct { + Shared string // 包含共享存储空间,如果为 "rd" 则包含具有读权限空间,如果为 "rw" 则包含读写权限空间 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + BucketNames BucketNames // 存储空间列表 +} + +// 存储空间列表 +type BucketNames []string + +func (j *Response) MarshalJSON() ([]byte, error) { + return json.Marshal(j.BucketNames) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var array BucketNames + if err := json.Unmarshal(data, &array); err != nil { + return err + } + j.BucketNames = array + return nil +} diff --git a/storagev2/apis/get_buckets_v4/api.go b/storagev2/apis/get_buckets_v4/api.go new file mode 100644 index 00000000..e0244e2e --- /dev/null +++ b/storagev2/apis/get_buckets_v4/api.go @@ -0,0 +1,111 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 获取拥有的所有存储空间列表 +package get_buckets_v4 + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + Region string // 区域 ID + Limit int64 // 分页大小。默认20,取值范围 1~100。 + Marker string // 列举开始的空间标识 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + NextMarker string // 下一页开始的空间标识 + IsTruncated bool // 是否所有的结果都已经返回 + Buckets BucketsV4 +} + +// 存储空间信息 +type BucketV4 struct { + Name string // 空间名称 + Region string // 空间区域 ID + Private bool // 空间是否私有 + CreatedTime string // 空间创建时间 +} +type jsonBucketV4 struct { + Name string `json:"name"` // 空间名称 + Region string `json:"region"` // 空间区域 ID + Private bool `json:"private"` // 空间是否私有 + CreatedTime string `json:"ctime"` // 空间创建时间 +} + +func (j *BucketV4) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonBucketV4{Name: j.Name, Region: j.Region, Private: j.Private, CreatedTime: j.CreatedTime}) +} +func (j *BucketV4) UnmarshalJSON(data []byte) error { + var nj jsonBucketV4 + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Name = nj.Name + j.Region = nj.Region + j.Private = nj.Private + j.CreatedTime = nj.CreatedTime + return nil +} +func (j *BucketV4) validate() error { + if j.Name == "" { + return errors.MissingRequiredFieldError{Name: "Name"} + } + if j.Region == "" { + return errors.MissingRequiredFieldError{Name: "Region"} + } + if j.CreatedTime == "" { + return errors.MissingRequiredFieldError{Name: "CreatedTime"} + } + return nil +} + +// 存储空间列表 +type BucketsV4 = []BucketV4 + +// 返回所有存储空间结果 +type BucketsResultV4 = Response +type jsonResponse struct { + NextMarker string `json:"nextMarker"` // 下一页开始的空间标识 + IsTruncated bool `json:"isTruncated"` // 是否所有的结果都已经返回 + Buckets BucketsV4 `json:"buckets"` +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{NextMarker: j.NextMarker, IsTruncated: j.IsTruncated, Buckets: j.Buckets}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.NextMarker = nj.NextMarker + j.IsTruncated = nj.IsTruncated + j.Buckets = nj.Buckets + return nil +} +func (j *Response) validate() error { + if j.NextMarker == "" { + return errors.MissingRequiredFieldError{Name: "NextMarker"} + } + if len(j.Buckets) == 0 { + return errors.MissingRequiredFieldError{Name: "Buckets"} + } + for _, value := range j.Buckets { + if err := value.validate(); err != nil { + return err + } + } + return nil +} diff --git a/storagev2/apis/get_objects/api.go b/storagev2/apis/get_objects/api.go new file mode 100644 index 00000000..35224da3 --- /dev/null +++ b/storagev2/apis/get_objects/api.go @@ -0,0 +1,138 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 列举指定存储空间里的所有对象条目 +package get_objects + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 指定存储空间 + Marker string // 上一次列举返回的位置标记,作为本次列举的起点信息 + Limit int64 // 本次列举的条目数,范围为 1-1000 + Prefix string // 指定前缀,只有资源名匹配该前缀的资源会被列出 + Delimiter string // 指定目录分隔符,列出所有公共前缀(模拟列出目录效果) + NeedParts bool // 如果文件是通过分片上传的,是否返回对应的分片信息 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + Marker string // 有剩余条目则返回非空字符串,作为下一次列举的参数传入,如果没有剩余条目则返回空字符串 + CommonPrefixes CommonPrefixes // 公共前缀的数组,如没有指定 delimiter 参数则不返回 + Items ListedObjects // 条目的数组,不能用来判断是否还有剩余条目 +} + +// 公共前缀的数组 +type CommonPrefixes = []string + +// 每个分片的大小 +type PartSizes = []int64 + +// 对象条目,包含对象的元信息 +type ListedObjectEntry struct { + Key string // 对象名称 + PutTime int64 // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒 + Hash string // 文件的哈希值 + Size int64 // 对象大小,单位为字节 + MimeType string // 对象 MIME 类型 + Type int64 // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储 + EndUser string // 资源内容的唯一属主标识 + RestoringStatus int64 // 文件的存储状态,即禁用状态和启用状态间的的互相转换,`0` 表示启用,`1`表示禁用 + Md5 string // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回 + Parts PartSizes // 每个分片的大小,如没有指定 need_parts 参数则不返回 +} +type jsonListedObjectEntry struct { + Key string `json:"key"` // 对象名称 + PutTime int64 `json:"putTime"` // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒 + Hash string `json:"hash"` // 文件的哈希值 + Size int64 `json:"fsize,omitempty"` // 对象大小,单位为字节 + MimeType string `json:"mimeType"` // 对象 MIME 类型 + Type int64 `json:"type,omitempty"` // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储 + EndUser string `json:"endUser,omitempty"` // 资源内容的唯一属主标识 + RestoringStatus int64 `json:"status,omitempty"` // 文件的存储状态,即禁用状态和启用状态间的的互相转换,`0` 表示启用,`1`表示禁用 + Md5 string `json:"md5,omitempty"` // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回 + Parts PartSizes `json:"parts,omitempty"` // 每个分片的大小,如没有指定 need_parts 参数则不返回 +} + +func (j *ListedObjectEntry) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonListedObjectEntry{Key: j.Key, PutTime: j.PutTime, Hash: j.Hash, Size: j.Size, MimeType: j.MimeType, Type: j.Type, EndUser: j.EndUser, RestoringStatus: j.RestoringStatus, Md5: j.Md5, Parts: j.Parts}) +} +func (j *ListedObjectEntry) UnmarshalJSON(data []byte) error { + var nj jsonListedObjectEntry + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Key = nj.Key + j.PutTime = nj.PutTime + j.Hash = nj.Hash + j.Size = nj.Size + j.MimeType = nj.MimeType + j.Type = nj.Type + j.EndUser = nj.EndUser + j.RestoringStatus = nj.RestoringStatus + j.Md5 = nj.Md5 + j.Parts = nj.Parts + return nil +} +func (j *ListedObjectEntry) validate() error { + if j.Key == "" { + return errors.MissingRequiredFieldError{Name: "Key"} + } + if j.PutTime == 0 { + return errors.MissingRequiredFieldError{Name: "PutTime"} + } + if j.Hash == "" { + return errors.MissingRequiredFieldError{Name: "Hash"} + } + if j.MimeType == "" { + return errors.MissingRequiredFieldError{Name: "MimeType"} + } + return nil +} + +// 条目的数组,不能用来判断是否还有剩余条目 +type ListedObjects = []ListedObjectEntry + +// 本次列举的对象条目信息 +type ListedObjectEntries = Response +type jsonResponse struct { + Marker string `json:"marker,omitempty"` // 有剩余条目则返回非空字符串,作为下一次列举的参数传入,如果没有剩余条目则返回空字符串 + CommonPrefixes CommonPrefixes `json:"commonPrefixes,omitempty"` // 公共前缀的数组,如没有指定 delimiter 参数则不返回 + Items ListedObjects `json:"items"` // 条目的数组,不能用来判断是否还有剩余条目 +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{Marker: j.Marker, CommonPrefixes: j.CommonPrefixes, Items: j.Items}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Marker = nj.Marker + j.CommonPrefixes = nj.CommonPrefixes + j.Items = nj.Items + return nil +} +func (j *Response) validate() error { + if len(j.Items) == 0 { + return errors.MissingRequiredFieldError{Name: "Items"} + } + for _, value := range j.Items { + if err := value.validate(); err != nil { + return err + } + } + return nil +} diff --git a/storagev2/apis/get_objects_v2/api.go b/storagev2/apis/get_objects_v2/api.go new file mode 100644 index 00000000..39542ca0 --- /dev/null +++ b/storagev2/apis/get_objects_v2/api.go @@ -0,0 +1,25 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 列举指定存储空间里的所有对象条目 +package get_objects_v2 + +import ( + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "io" +) + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 指定存储空间 + Marker string // 上一次列举返回的位置标记,作为本次列举的起点信息 + Limit int64 // 本次列举的条目数,范围为 1-1000 + Prefix string // 指定前缀,只有资源名匹配该前缀的资源会被列出 + Delimiter string // 指定目录分隔符,列出所有公共前缀(模拟列出目录效果) + NeedParts bool // 如果文件是通过分片上传的,是否返回对应的分片信息 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + Body io.ReadCloser +} diff --git a/storagev2/apis/get_regions/api.go b/storagev2/apis/get_regions/api.go new file mode 100644 index 00000000..608de4d5 --- /dev/null +++ b/storagev2/apis/get_regions/api.go @@ -0,0 +1,90 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 获取所有区域信息 +package get_regions + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + Regions Regions // 区域列表 +} + +// 区域信息 +type Region struct { + Id string // 区域 ID + Description string // 区域描述信息 +} +type jsonRegion struct { + Id string `json:"id"` // 区域 ID + Description string `json:"description"` // 区域描述信息 +} + +func (j *Region) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonRegion{Id: j.Id, Description: j.Description}) +} +func (j *Region) UnmarshalJSON(data []byte) error { + var nj jsonRegion + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Id = nj.Id + j.Description = nj.Description + return nil +} +func (j *Region) validate() error { + if j.Id == "" { + return errors.MissingRequiredFieldError{Name: "Id"} + } + if j.Description == "" { + return errors.MissingRequiredFieldError{Name: "Description"} + } + return nil +} + +// 区域列表 +type Regions = []Region + +// 所有区域信息 +type RegionsInfo = Response +type jsonResponse struct { + Regions Regions `json:"regions"` // 区域列表 +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{Regions: j.Regions}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Regions = nj.Regions + return nil +} +func (j *Response) validate() error { + if len(j.Regions) == 0 { + return errors.MissingRequiredFieldError{Name: "Regions"} + } + for _, value := range j.Regions { + if err := value.validate(); err != nil { + return err + } + } + return nil +} diff --git a/storagev2/apis/modify_object_life_cycle/api.go b/storagev2/apis/modify_object_life_cycle/api.go new file mode 100644 index 00000000..1f258f6a --- /dev/null +++ b/storagev2/apis/modify_object_life_cycle/api.go @@ -0,0 +1,20 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 修改已上传对象的生命周期 +package modify_object_life_cycle + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Entry string // 指定目标对象空间与目标对象名称,格式为 <目标对象空间>:<目标对象名称> + ToIaAfterDays int64 // 指定文件上传后在设置的 ToIAAfterDays 转换到低频存储类型,设置为 -1 表示取消已设置的转低频存储的生命周期规则 + ToArchiveAfterDays int64 // 指定文件上传后在设置的 toArchiveAfterDays 转换到归档存储类型, 设置为 -1 表示取消已设置的转归档存储的生命周期规则 + ToDeepArchiveAfterDays int64 // 指定文件上传后在设置的 toDeepArchiveAfterDays 转换到深度归档存储类型, 设置为 -1 表示取消已设置的转深度归档存储的生命周期规则 + ToArchiveIrAfterDays int64 // 指定文件上传后在设置的 toArchiveIRAfterDays 转换到归档直读存储类型, 设置为 -1 表示取消已设置的转归档直读存储的生命周期规则 + DeleteAfterDays int64 // 指定文件上传后在设置的 DeleteAfterDays 过期删除,删除后不可恢复,设置为 -1 表示取消已设置的过期删除的生命周期规则 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/modify_object_metadata/api.go b/storagev2/apis/modify_object_metadata/api.go new file mode 100644 index 00000000..a9212d0e --- /dev/null +++ b/storagev2/apis/modify_object_metadata/api.go @@ -0,0 +1,18 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 修改文件元信息 +package modify_object_metadata + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Entry string // 指定目标对象空间与目标对象名称,格式为 <目标对象空间>:<目标对象名称> + MimeType string // 新的 MIME 类型 + Condition string // 条件匹配,当前支持设置 hash、mime、fsize、putTime 条件,只有条件匹配才会执行修改操作,格式为 condKey1=condVal1&condKey2=condVal2 + MetaData map[string]string // 对象存储元信息,键可以自定义,它可以由字母、数字、下划线、减号组成,必须以 x-qn-meta- 为前缀,且长度小于等于 50,单个文件键和值总和大小不能超过 1024 字节,可以同时修改多个键 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/modify_object_status/api.go b/storagev2/apis/modify_object_status/api.go new file mode 100644 index 00000000..c88432de --- /dev/null +++ b/storagev2/apis/modify_object_status/api.go @@ -0,0 +1,16 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 修改文件的存储状态,即禁用状态和启用状态间的的互相转换 +package modify_object_status + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Entry string // 指定目标对象空间与目标对象名称,格式为 <目标对象空间>:<目标对象名称> + Status int64 // `0` 表示启用;`1` 表示禁用 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/move_object/api.go b/storagev2/apis/move_object/api.go new file mode 100644 index 00000000..969f9428 --- /dev/null +++ b/storagev2/apis/move_object/api.go @@ -0,0 +1,17 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 将源空间的指定对象移动到目标空间,或在同一空间内对对象重命名 +package move_object + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + SrcEntry string // 指定源对象空间与源对象名称,格式为 <源对象空间>:<源对象名称> + DestEntry string // 指定目标对象空间与目标对象名称,格式为 <目标对象空间>:<目标对象名称> + IsForce bool // 如果目标对象名已被占用,则返回错误码 614,且不做任何覆盖操作;如果指定为 true,会强制覆盖目标对象 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/post_object/api.go b/storagev2/apis/post_object/api.go new file mode 100644 index 00000000..9f869256 --- /dev/null +++ b/storagev2/apis/post_object/api.go @@ -0,0 +1,32 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 在一次 HTTP 会话中上传单一的一个文件 +package post_object + +import ( + "encoding/json" + httpclient "github.com/qiniu/go-sdk/v7/storagev2/http_client" + uptoken "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +// 调用 API 所用的请求 +type Request struct { + ObjectName *string + UploadToken uptoken.Provider + Crc32 int64 + File httpclient.MultipartFormBinaryData + CustomData map[string]string + ResponseBody interface{} // 响应体,如果为空,则 Response.Body 的类型由 encoding/json 库决定 +} + +// 获取 API 所用的响应 +type Response struct { + Body interface{} +} + +func (j *Response) MarshalJSON() ([]byte, error) { + return json.Marshal(j.Body) +} +func (j *Response) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &j.Body) +} diff --git a/storagev2/apis/prefetch_object/api.go b/storagev2/apis/prefetch_object/api.go new file mode 100644 index 00000000..5ab2095c --- /dev/null +++ b/storagev2/apis/prefetch_object/api.go @@ -0,0 +1,15 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 对于设置了镜像存储的空间,从镜像源站抓取指定名称的对象并存储到该空间中,如果该空间中已存在该名称的对象,则会将镜像源站的对象覆盖空间中相同名称的对象 +package prefetch_object + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Entry string // 指定目标对象空间与目标对象名称,格式为 <目标对象空间>:<目标对象名称> + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/restore_archived_object/api.go b/storagev2/apis/restore_archived_object/api.go new file mode 100644 index 00000000..c2f3ddac --- /dev/null +++ b/storagev2/apis/restore_archived_object/api.go @@ -0,0 +1,16 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 解冻归档存储类型的文件,可设置解冻有效期1~7天,完成解冻任务通常需要1~5分钟 +package restore_archived_object + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Entry string // 指定目标对象空间与目标对象名称,格式为 <目标对象空间>:<目标对象名称> + FreezeAfterDays int64 // 解冻有效时长,取值范围 1~7 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/resumable_upload_v1_bput/api.go b/storagev2/apis/resumable_upload_v1_bput/api.go new file mode 100644 index 00000000..ebf30d72 --- /dev/null +++ b/storagev2/apis/resumable_upload_v1_bput/api.go @@ -0,0 +1,81 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 上传指定块的一片数据,具体数据量可根据现场环境调整,同一块的每片数据必须串行上传 +package resumable_upload_v1_bput + +import ( + "encoding/json" + io "github.com/qiniu/go-sdk/v7/internal/io" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + uptoken "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +// 调用 API 所用的请求 +type Request struct { + Ctx string // 前一次上传返回的块级上传控制信息 + ChunkOffset int64 // 当前片在整个块中的起始偏移 + UpToken uptoken.Provider // 上传凭证,如果为空,则使用 HTTPClientOptions 中的 UpToken + Body io.ReadSeekCloser // 请求体 +} + +// 获取 API 所用的响应 +type Response struct { + Ctx string // 本次上传成功后的块级上传控制信息,用于后续上传片(bput)及创建文件(mkfile) + Checksum string // 上传块 SHA1 值,使用 URL 安全的 Base64 编码 + Crc32 int64 // 上传块 CRC32 值,客户可通过此字段对上传块的完整性进行校验 + Offset int64 // 下一个上传块在切割块中的偏移 + Host string // 后续上传接收地址 + ExpiredAt int64 // `ctx` 过期时间 +} + +// 返回下一片数据的上传信息 +type ChunkInfo = Response +type jsonResponse struct { + Ctx string `json:"ctx"` // 本次上传成功后的块级上传控制信息,用于后续上传片(bput)及创建文件(mkfile) + Checksum string `json:"checksum"` // 上传块 SHA1 值,使用 URL 安全的 Base64 编码 + Crc32 int64 `json:"crc32"` // 上传块 CRC32 值,客户可通过此字段对上传块的完整性进行校验 + Offset int64 `json:"offset"` // 下一个上传块在切割块中的偏移 + Host string `json:"host"` // 后续上传接收地址 + ExpiredAt int64 `json:"expired_at"` // `ctx` 过期时间 +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{Ctx: j.Ctx, Checksum: j.Checksum, Crc32: j.Crc32, Offset: j.Offset, Host: j.Host, ExpiredAt: j.ExpiredAt}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Ctx = nj.Ctx + j.Checksum = nj.Checksum + j.Crc32 = nj.Crc32 + j.Offset = nj.Offset + j.Host = nj.Host + j.ExpiredAt = nj.ExpiredAt + return nil +} +func (j *Response) validate() error { + if j.Ctx == "" { + return errors.MissingRequiredFieldError{Name: "Ctx"} + } + if j.Checksum == "" { + return errors.MissingRequiredFieldError{Name: "Checksum"} + } + if j.Crc32 == 0 { + return errors.MissingRequiredFieldError{Name: "Crc32"} + } + if j.Offset == 0 { + return errors.MissingRequiredFieldError{Name: "Offset"} + } + if j.Host == "" { + return errors.MissingRequiredFieldError{Name: "Host"} + } + if j.ExpiredAt == 0 { + return errors.MissingRequiredFieldError{Name: "ExpiredAt"} + } + return nil +} diff --git a/storagev2/apis/resumable_upload_v1_make_block/api.go b/storagev2/apis/resumable_upload_v1_make_block/api.go new file mode 100644 index 00000000..2beb30e8 --- /dev/null +++ b/storagev2/apis/resumable_upload_v1_make_block/api.go @@ -0,0 +1,80 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 为后续分片上传创建一个新的块,同时上传第一片数据 +package resumable_upload_v1_make_block + +import ( + "encoding/json" + io "github.com/qiniu/go-sdk/v7/internal/io" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + uptoken "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +// 调用 API 所用的请求 +type Request struct { + BlockSize int64 // 块大小,单位为字节,每块均为 4 MB,最后一块大小不超过 4 MB + UpToken uptoken.Provider // 上传凭证,如果为空,则使用 HTTPClientOptions 中的 UpToken + Body io.ReadSeekCloser // 请求体 +} + +// 获取 API 所用的响应 +type Response struct { + Ctx string // 本次上传成功后的块级上传控制信息,用于后续上传片(bput)及创建文件(mkfile) + Checksum string // 上传块 SHA1 值,使用 URL 安全的 Base64 编码 + Crc32 int64 // 上传块 CRC32 值,客户可通过此字段对上传块的完整性进行校验 + Offset int64 // 下一个上传块在切割块中的偏移 + Host string // 后续上传接收地址 + ExpiredAt int64 // `ctx` 过期时间 +} + +// 返回下一片数据的上传信息 +type NewBlockInfo = Response +type jsonResponse struct { + Ctx string `json:"ctx"` // 本次上传成功后的块级上传控制信息,用于后续上传片(bput)及创建文件(mkfile) + Checksum string `json:"checksum"` // 上传块 SHA1 值,使用 URL 安全的 Base64 编码 + Crc32 int64 `json:"crc32"` // 上传块 CRC32 值,客户可通过此字段对上传块的完整性进行校验 + Offset int64 `json:"offset"` // 下一个上传块在切割块中的偏移 + Host string `json:"host"` // 后续上传接收地址 + ExpiredAt int64 `json:"expired_at"` // `ctx` 过期时间 +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{Ctx: j.Ctx, Checksum: j.Checksum, Crc32: j.Crc32, Offset: j.Offset, Host: j.Host, ExpiredAt: j.ExpiredAt}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Ctx = nj.Ctx + j.Checksum = nj.Checksum + j.Crc32 = nj.Crc32 + j.Offset = nj.Offset + j.Host = nj.Host + j.ExpiredAt = nj.ExpiredAt + return nil +} +func (j *Response) validate() error { + if j.Ctx == "" { + return errors.MissingRequiredFieldError{Name: "Ctx"} + } + if j.Checksum == "" { + return errors.MissingRequiredFieldError{Name: "Checksum"} + } + if j.Crc32 == 0 { + return errors.MissingRequiredFieldError{Name: "Crc32"} + } + if j.Offset == 0 { + return errors.MissingRequiredFieldError{Name: "Offset"} + } + if j.Host == "" { + return errors.MissingRequiredFieldError{Name: "Host"} + } + if j.ExpiredAt == 0 { + return errors.MissingRequiredFieldError{Name: "ExpiredAt"} + } + return nil +} diff --git a/storagev2/apis/resumable_upload_v1_make_file/api.go b/storagev2/apis/resumable_upload_v1_make_file/api.go new file mode 100644 index 00000000..90f3238c --- /dev/null +++ b/storagev2/apis/resumable_upload_v1_make_file/api.go @@ -0,0 +1,34 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 将上传好的所有数据块按指定顺序合并成一个资源文件 +package resumable_upload_v1_make_file + +import ( + "encoding/json" + io "github.com/qiniu/go-sdk/v7/internal/io" + uptoken "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +// 调用 API 所用的请求 +type Request struct { + Size int64 // 对象大小 + ObjectName *string // 对象名称 + FileName string // 文件名称,若未指定,则魔法变量中无法使用fname,ext,fprefix + MimeType string // 文件 MIME 类型,若未指定,则根据文件内容自动检测 MIME 类型 + CustomData map[string]string // 自定义元数据(需要以 `x-qn-meta-` 作为前缀)或自定义变量(需要以 `x:` 作为前缀) + UpToken uptoken.Provider // 上传凭证,如果为空,则使用 HTTPClientOptions 中的 UpToken + Body io.ReadSeekCloser // 请求体 + ResponseBody interface{} // 响应体,如果为空,则 Response.Body 的类型由 encoding/json 库决定 +} + +// 获取 API 所用的响应 +type Response struct { + Body interface{} +} + +func (j *Response) MarshalJSON() ([]byte, error) { + return json.Marshal(j.Body) +} +func (j *Response) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &j.Body) +} diff --git a/storagev2/apis/resumable_upload_v2_abort_multipart_upload/api.go b/storagev2/apis/resumable_upload_v2_abort_multipart_upload/api.go new file mode 100644 index 00000000..79f1f190 --- /dev/null +++ b/storagev2/apis/resumable_upload_v2_abort_multipart_upload/api.go @@ -0,0 +1,17 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 根据 UploadId 终止 Multipart Upload +package resumable_upload_v2_abort_multipart_upload + +import uptoken "github.com/qiniu/go-sdk/v7/storagev2/uptoken" + +// 调用 API 所用的请求 +type Request struct { + BucketName string // 存储空间名称 + ObjectName *string // 对象名称 + UploadId string // 在服务端申请的 Multipart Upload 任务 id + UpToken uptoken.Provider // 上传凭证,如果为空,则使用 HTTPClientOptions 中的 UpToken +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/resumable_upload_v2_complete_multipart_upload/api.go b/storagev2/apis/resumable_upload_v2_complete_multipart_upload/api.go new file mode 100644 index 00000000..609afc8d --- /dev/null +++ b/storagev2/apis/resumable_upload_v2_complete_multipart_upload/api.go @@ -0,0 +1,114 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 在将所有数据分片都上传完成后,必须调用 completeMultipartUpload API 来完成整个文件的 Multipart Upload。用户需要提供有效数据的分片列表(包括 PartNumber 和调用 uploadPart API 服务端返回的 Etag)。服务端收到用户提交的分片列表后,会逐一验证每个数据分片的有效性。当所有的数据分片验证通过后,会把这些数据分片组合成一个完整的对象 +package resumable_upload_v2_complete_multipart_upload + +import ( + "encoding/json" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + uptoken "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +// 调用 API 所用的请求 +type Request struct { + BucketName string // 存储空间名称 + ObjectName *string // 对象名称 + UploadId string // 在服务端申请的 Multipart Upload 任务 id + UpToken uptoken.Provider // 上传凭证,如果为空,则使用 HTTPClientOptions 中的 UpToken + Parts Parts // 已经上传的分片列表 + FileName string // 上传的原始文件名,若未指定,则魔法变量中无法使用 fname,ext,suffix + MimeType string // 若指定了则设置上传文件的 MIME 类型,若未指定,则根据文件内容自动检测 MIME 类型 + Metadata map[string]string // 用户自定义文件 metadata 信息的键值对,可以设置多个,MetaKey 和 MetaValue 都是 string,,其中 可以由字母、数字、下划线、减号组成,且长度小于等于 50,单个文件 MetaKey 和 MetaValue 总和大小不能超过 1024 字节,MetaKey 必须以 `x-qn-meta-` 作为前缀 + CustomVars map[string]string // 用户自定义变量 + ResponseBody interface{} // 响应体,如果为空,则 Response.Body 的类型由 encoding/json 库决定 +} + +// 单个分片信息 +type PartInfo struct { + PartNumber int64 // 每一个上传的分片都有一个标识它的号码 + Etag string // 上传块的 etag +} +type jsonPartInfo struct { + PartNumber int64 `json:"partNumber"` // 每一个上传的分片都有一个标识它的号码 + Etag string `json:"etag"` // 上传块的 etag +} + +func (j *PartInfo) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonPartInfo{PartNumber: j.PartNumber, Etag: j.Etag}) +} +func (j *PartInfo) UnmarshalJSON(data []byte) error { + var nj jsonPartInfo + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.PartNumber = nj.PartNumber + j.Etag = nj.Etag + return nil +} +func (j *PartInfo) validate() error { + if j.PartNumber == 0 { + return errors.MissingRequiredFieldError{Name: "PartNumber"} + } + if j.Etag == "" { + return errors.MissingRequiredFieldError{Name: "Etag"} + } + return nil +} + +// 分片信息列表 +type Parts = []PartInfo + +// 新上传的对象的相关信息 +type ObjectInfo = Request +type jsonRequest struct { + Parts Parts `json:"parts"` // 已经上传的分片列表 + FileName string `json:"fname,omitempty"` // 上传的原始文件名,若未指定,则魔法变量中无法使用 fname,ext,suffix + MimeType string `json:"mime_type,omitempty"` // 若指定了则设置上传文件的 MIME 类型,若未指定,则根据文件内容自动检测 MIME 类型 + Metadata map[string]string `json:"metadata,omitempty"` // 用户自定义文件 metadata 信息的键值对,可以设置多个,MetaKey 和 MetaValue 都是 string,,其中 可以由字母、数字、下划线、减号组成,且长度小于等于 50,单个文件 MetaKey 和 MetaValue 总和大小不能超过 1024 字节,MetaKey 必须以 `x-qn-meta-` 作为前缀 + CustomVars map[string]string `json:"customVars,omitempty"` // 用户自定义变量 +} + +func (j *Request) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonRequest{Parts: j.Parts, FileName: j.FileName, MimeType: j.MimeType, Metadata: j.Metadata, CustomVars: j.CustomVars}) +} +func (j *Request) UnmarshalJSON(data []byte) error { + var nj jsonRequest + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Parts = nj.Parts + j.FileName = nj.FileName + j.MimeType = nj.MimeType + j.Metadata = nj.Metadata + j.CustomVars = nj.CustomVars + return nil +} +func (j *Request) validate() error { + if len(j.Parts) == 0 { + return errors.MissingRequiredFieldError{Name: "Parts"} + } + for _, value := range j.Parts { + if err := value.validate(); err != nil { + return err + } + } + return nil +} + +// 获取 API 所用的响应 +type Response struct { + Body interface{} +} + +func (j *Response) MarshalJSON() ([]byte, error) { + return json.Marshal(j.Body) +} +func (j *Response) UnmarshalJSON(data []byte) error { + return json.Unmarshal(data, &j.Body) +} diff --git a/storagev2/apis/resumable_upload_v2_initiate_multipart_upload/api.go b/storagev2/apis/resumable_upload_v2_initiate_multipart_upload/api.go new file mode 100644 index 00000000..1c2959ed --- /dev/null +++ b/storagev2/apis/resumable_upload_v2_initiate_multipart_upload/api.go @@ -0,0 +1,55 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 使用 Multipart Upload 方式上传数据前,必须先调用 API 来获取一个全局唯一的 UploadId,后续的块数据通过 uploadPart API 上传,整个文件完成 completeMultipartUpload API,已经上传块的删除 abortMultipartUpload API 都依赖该 UploadId +package resumable_upload_v2_initiate_multipart_upload + +import ( + "encoding/json" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + uptoken "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +// 调用 API 所用的请求 +type Request struct { + BucketName string // 存储空间名称 + ObjectName *string // 对象名称 + UpToken uptoken.Provider // 上传凭证,如果为空,则使用 HTTPClientOptions 中的 UpToken +} + +// 获取 API 所用的响应 +type Response struct { + UploadId string // 初始化文件生成的 id + ExpiredAt int64 // UploadId 的过期时间 UNIX 时间戳,过期之后 UploadId 不可用 +} + +// 返回本次 MultipartUpload 相关信息 +type NewMultipartUpload = Response +type jsonResponse struct { + UploadId string `json:"uploadId"` // 初始化文件生成的 id + ExpiredAt int64 `json:"expireAt"` // UploadId 的过期时间 UNIX 时间戳,过期之后 UploadId 不可用 +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{UploadId: j.UploadId, ExpiredAt: j.ExpiredAt}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.UploadId = nj.UploadId + j.ExpiredAt = nj.ExpiredAt + return nil +} +func (j *Response) validate() error { + if j.UploadId == "" { + return errors.MissingRequiredFieldError{Name: "UploadId"} + } + if j.ExpiredAt == 0 { + return errors.MissingRequiredFieldError{Name: "ExpiredAt"} + } + return nil +} diff --git a/storagev2/apis/resumable_upload_v2_list_parts/api.go b/storagev2/apis/resumable_upload_v2_list_parts/api.go new file mode 100644 index 00000000..97266629 --- /dev/null +++ b/storagev2/apis/resumable_upload_v2_list_parts/api.go @@ -0,0 +1,122 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 列举出指定 UploadId 所属任务所有已经上传成功的分片 +package resumable_upload_v2_list_parts + +import ( + "encoding/json" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + uptoken "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +// 调用 API 所用的请求 +type Request struct { + BucketName string // 存储空间名称 + ObjectName *string // 对象名称 + UploadId string // 在服务端申请的 Multipart Upload 任务 id + MaxParts int64 // 响应中的最大分片数目。默认值:1000,最大值:1000 + PartNumberMarker int64 // 指定列举的起始位置,只有 partNumber 值大于该参数的分片会被列出 + UpToken uptoken.Provider // 上传凭证,如果为空,则使用 HTTPClientOptions 中的 UpToken +} + +// 获取 API 所用的响应 +type Response struct { + UploadId string // 在服务端申请的 Multipart Upload 任务 id + ExpiredAt int64 // UploadId 的过期时间 UNIX 时间戳,过期之后 UploadId 不可用 + PartNumberMarker int64 // 下次继续列举的起始位置,0 表示列举结束,没有更多分片 + Parts ListedParts // 返回所有已经上传成功的分片信息 +} + +// 单个已经上传的分片信息 +type ListedPartInfo struct { + Size int64 // 分片大小 + Etag string // 分片内容的 etag + PartNumber int64 // 每一个上传的分片都有一个标识它的号码 + PutTime int64 // 分片上传时间 UNIX 时间戳 +} +type jsonListedPartInfo struct { + Size int64 `json:"size,omitempty"` // 分片大小 + Etag string `json:"etag"` // 分片内容的 etag + PartNumber int64 `json:"partNumber"` // 每一个上传的分片都有一个标识它的号码 + PutTime int64 `json:"putTime"` // 分片上传时间 UNIX 时间戳 +} + +func (j *ListedPartInfo) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonListedPartInfo{Size: j.Size, Etag: j.Etag, PartNumber: j.PartNumber, PutTime: j.PutTime}) +} +func (j *ListedPartInfo) UnmarshalJSON(data []byte) error { + var nj jsonListedPartInfo + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Size = nj.Size + j.Etag = nj.Etag + j.PartNumber = nj.PartNumber + j.PutTime = nj.PutTime + return nil +} +func (j *ListedPartInfo) validate() error { + if j.Etag == "" { + return errors.MissingRequiredFieldError{Name: "Etag"} + } + if j.PartNumber == 0 { + return errors.MissingRequiredFieldError{Name: "PartNumber"} + } + if j.PutTime == 0 { + return errors.MissingRequiredFieldError{Name: "PutTime"} + } + return nil +} + +// 所有已经上传的分片信息 +type ListedParts = []ListedPartInfo + +// 返回所有已经上传成功的分片信息 +type ListedPartsResponse = Response +type jsonResponse struct { + UploadId string `json:"uploadId"` // 在服务端申请的 Multipart Upload 任务 id + ExpiredAt int64 `json:"expireAt"` // UploadId 的过期时间 UNIX 时间戳,过期之后 UploadId 不可用 + PartNumberMarker int64 `json:"partNumberMarker"` // 下次继续列举的起始位置,0 表示列举结束,没有更多分片 + Parts ListedParts `json:"parts"` // 返回所有已经上传成功的分片信息 +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{UploadId: j.UploadId, ExpiredAt: j.ExpiredAt, PartNumberMarker: j.PartNumberMarker, Parts: j.Parts}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.UploadId = nj.UploadId + j.ExpiredAt = nj.ExpiredAt + j.PartNumberMarker = nj.PartNumberMarker + j.Parts = nj.Parts + return nil +} +func (j *Response) validate() error { + if j.UploadId == "" { + return errors.MissingRequiredFieldError{Name: "UploadId"} + } + if j.ExpiredAt == 0 { + return errors.MissingRequiredFieldError{Name: "ExpiredAt"} + } + if j.PartNumberMarker == 0 { + return errors.MissingRequiredFieldError{Name: "PartNumberMarker"} + } + if len(j.Parts) == 0 { + return errors.MissingRequiredFieldError{Name: "Parts"} + } + for _, value := range j.Parts { + if err := value.validate(); err != nil { + return err + } + } + return nil +} diff --git a/storagev2/apis/resumable_upload_v2_upload_part/api.go b/storagev2/apis/resumable_upload_v2_upload_part/api.go new file mode 100644 index 00000000..edd7702f --- /dev/null +++ b/storagev2/apis/resumable_upload_v2_upload_part/api.go @@ -0,0 +1,60 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 初始化一个 Multipart Upload 任务之后,可以根据指定的对象名称和 UploadId 来分片上传数据 +package resumable_upload_v2_upload_part + +import ( + "encoding/json" + io "github.com/qiniu/go-sdk/v7/internal/io" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" + uptoken "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +// 调用 API 所用的请求 +type Request struct { + BucketName string // 存储空间名称 + ObjectName *string // 对象名称 + UploadId string // 在服务端申请的 Multipart Upload 任务 id + PartNumber int64 // 每一个上传的分片都有一个标识它的号码 + Md5 string // 上传块内容的 md5 值,如果指定服务端会进行校验,不指定不校验 + UpToken uptoken.Provider // 上传凭证,如果为空,则使用 HTTPClientOptions 中的 UpToken + Body io.ReadSeekCloser // 请求体 +} + +// 获取 API 所用的响应 +type Response struct { + Etag string // 上传块内容的 etag,用来标识块,completeMultipartUpload API 调用的时候作为参数进行文件合成 + Md5 string // 上传块内容的 MD5 值 +} + +// 返回本次上传的分片相关信息 +type NewPartInfo = Response +type jsonResponse struct { + Etag string `json:"etag"` // 上传块内容的 etag,用来标识块,completeMultipartUpload API 调用的时候作为参数进行文件合成 + Md5 string `json:"md5"` // 上传块内容的 MD5 值 +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{Etag: j.Etag, Md5: j.Md5}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Etag = nj.Etag + j.Md5 = nj.Md5 + return nil +} +func (j *Response) validate() error { + if j.Etag == "" { + return errors.MissingRequiredFieldError{Name: "Etag"} + } + if j.Md5 == "" { + return errors.MissingRequiredFieldError{Name: "Md5"} + } + return nil +} diff --git a/storagev2/apis/set_bucket_access_mode/api.go b/storagev2/apis/set_bucket_access_mode/api.go new file mode 100644 index 00000000..6952195c --- /dev/null +++ b/storagev2/apis/set_bucket_access_mode/api.go @@ -0,0 +1,16 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 设置存储空间的原图保护 +package set_bucket_access_mode + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 空间名称 + Mode int64 // 1 表示开启原图保护,0 表示关闭原图保护 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/set_bucket_cors_rules/api.go b/storagev2/apis/set_bucket_cors_rules/api.go new file mode 100644 index 00000000..6df71bed --- /dev/null +++ b/storagev2/apis/set_bucket_cors_rules/api.go @@ -0,0 +1,91 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 设置空间的跨域规则 +package set_bucket_cors_rules + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 指定空间名称 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider + CORSRules CORSRules // 跨域规则列表 +} + +// 允许的域名列表 +type AllowedOriginHosts = []string + +// 允许的方法列表 +type AllowedMethods = []string + +// 允许的 Header 列表 +type AllowedHeaders = []string + +// 暴露的 Header 列表 +type ExposedHeaders = []string + +// 跨域规则 +type CORSRule struct { + AllowedOrigin AllowedOriginHosts // 允许的域名。必填;支持通配符 * ;*表示全部匹配;只有第一个 * 生效;需要设置 "Scheme";大小写敏感 + AllowedMethod AllowedMethods // 允许的方法。必填;不支持通配符;大小写不敏感; + AllowedHeader AllowedHeaders + ExposedHeader ExposedHeaders // 选填;不支持通配符;X-Log, X-Reqid 是默认会暴露的两个 header;其他的 header 如果没有设置,则不会暴露;大小写不敏感; + MaxAge int64 // 结果可以缓存的时间。选填;空则不缓存 +} +type jsonCORSRule struct { + AllowedOrigin AllowedOriginHosts `json:"allowed_origin"` // 允许的域名。必填;支持通配符 * ;*表示全部匹配;只有第一个 * 生效;需要设置 "Scheme";大小写敏感 + AllowedMethod AllowedMethods `json:"allowed_method"` // 允许的方法。必填;不支持通配符;大小写不敏感; + AllowedHeader AllowedHeaders `json:"allowed_header,omitempty"` + ExposedHeader ExposedHeaders `json:"exposed_header,omitempty"` // 选填;不支持通配符;X-Log, X-Reqid 是默认会暴露的两个 header;其他的 header 如果没有设置,则不会暴露;大小写不敏感; + MaxAge int64 `json:"max_age,omitempty"` // 结果可以缓存的时间。选填;空则不缓存 +} + +func (j *CORSRule) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonCORSRule{AllowedOrigin: j.AllowedOrigin, AllowedMethod: j.AllowedMethod, AllowedHeader: j.AllowedHeader, ExposedHeader: j.ExposedHeader, MaxAge: j.MaxAge}) +} +func (j *CORSRule) UnmarshalJSON(data []byte) error { + var nj jsonCORSRule + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.AllowedOrigin = nj.AllowedOrigin + j.AllowedMethod = nj.AllowedMethod + j.AllowedHeader = nj.AllowedHeader + j.ExposedHeader = nj.ExposedHeader + j.MaxAge = nj.MaxAge + return nil +} +func (j *CORSRule) validate() error { + if len(j.AllowedOrigin) == 0 { + return errors.MissingRequiredFieldError{Name: "AllowedOrigin"} + } + if len(j.AllowedMethod) == 0 { + return errors.MissingRequiredFieldError{Name: "AllowedMethod"} + } + return nil +} + +// 跨域规则列表 +type CORSRules []CORSRule + +func (j *Request) MarshalJSON() ([]byte, error) { + return json.Marshal(j.CORSRules) +} +func (j *Request) UnmarshalJSON(data []byte) error { + var array CORSRules + if err := json.Unmarshal(data, &array); err != nil { + return err + } + j.CORSRules = array + return nil +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/set_bucket_max_age/api.go b/storagev2/apis/set_bucket_max_age/api.go new file mode 100644 index 00000000..f229da61 --- /dev/null +++ b/storagev2/apis/set_bucket_max_age/api.go @@ -0,0 +1,16 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 设置存储空间的 cache-control: max-age 响应头 +package set_bucket_max_age + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 空间名称 + MaxAge int64 // maxAge 为 0 或者负数表示为默认值(31536000) + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/set_bucket_private/api.go b/storagev2/apis/set_bucket_private/api.go new file mode 100644 index 00000000..28cc1993 --- /dev/null +++ b/storagev2/apis/set_bucket_private/api.go @@ -0,0 +1,16 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 设置存储空间的访问权限 +package set_bucket_private + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider + Bucket string // 空间名称 + IsPrivate int64 // `0`: 公开,`1`: 私有 +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/set_bucket_quota/api.go b/storagev2/apis/set_bucket_quota/api.go new file mode 100644 index 00000000..09250dfa --- /dev/null +++ b/storagev2/apis/set_bucket_quota/api.go @@ -0,0 +1,17 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 设置用户存储空间配额限制 +package set_bucket_quota + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 指定存储空间 + Size int64 // 空间存储量配额,参数传入 0 或不传表示不更改当前配置,传入 -1 表示取消限额 + Count int64 // 空间文件数配额,参数传入 0 或不传表示不更改当前配置,传入 -1 表示取消限额 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/set_bucket_remark/api.go b/storagev2/apis/set_bucket_remark/api.go new file mode 100644 index 00000000..a4b5c826 --- /dev/null +++ b/storagev2/apis/set_bucket_remark/api.go @@ -0,0 +1,44 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 设置空间备注 +package set_bucket_remark + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 空间名称 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider + Remark string // 空间备注信息, 字符长度不能超过 100, 允许为空 +} +type jsonRequest struct { + Remark string `json:"remark"` // 空间备注信息, 字符长度不能超过 100, 允许为空 +} + +func (j *Request) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonRequest{Remark: j.Remark}) +} +func (j *Request) UnmarshalJSON(data []byte) error { + var nj jsonRequest + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Remark = nj.Remark + return nil +} +func (j *Request) validate() error { + if j.Remark == "" { + return errors.MissingRequiredFieldError{Name: "Remark"} + } + return nil +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/set_bucket_taggings/api.go b/storagev2/apis/set_bucket_taggings/api.go new file mode 100644 index 00000000..f59d708c --- /dev/null +++ b/storagev2/apis/set_bucket_taggings/api.go @@ -0,0 +1,90 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 设置存储空间的标签列表,包括新增和修改 +package set_bucket_taggings + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 空间名称 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider + Tags Tags // 标签列表 +} + +// 标签键值对 +type TagInfo struct { + Key string // 标签名称,最大 64 Byte,不能为空且大小写敏感,不能以 kodo 为前缀(预留), 不支持中文字符,可使用的字符有:字母,数字,空格,+ - = . _ : / @ + Value string // 标签值,最大 128 Byte,不能为空且大小写敏感,不支持中文字符,可使用的字符有:字母,数字,空格,+ - = . _ : / @ +} +type jsonTagInfo struct { + Key string `json:"Key"` // 标签名称,最大 64 Byte,不能为空且大小写敏感,不能以 kodo 为前缀(预留), 不支持中文字符,可使用的字符有:字母,数字,空格,+ - = . _ : / @ + Value string `json:"Value"` // 标签值,最大 128 Byte,不能为空且大小写敏感,不支持中文字符,可使用的字符有:字母,数字,空格,+ - = . _ : / @ +} + +func (j *TagInfo) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonTagInfo{Key: j.Key, Value: j.Value}) +} +func (j *TagInfo) UnmarshalJSON(data []byte) error { + var nj jsonTagInfo + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Key = nj.Key + j.Value = nj.Value + return nil +} +func (j *TagInfo) validate() error { + if j.Key == "" { + return errors.MissingRequiredFieldError{Name: "Key"} + } + if j.Value == "" { + return errors.MissingRequiredFieldError{Name: "Value"} + } + return nil +} + +// 标签列表 +type Tags = []TagInfo + +// 存储空间标签信息 +type TagsInfo = Request +type jsonRequest struct { + Tags Tags `json:"Tags"` // 标签列表 +} + +func (j *Request) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonRequest{Tags: j.Tags}) +} +func (j *Request) UnmarshalJSON(data []byte) error { + var nj jsonRequest + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Tags = nj.Tags + return nil +} +func (j *Request) validate() error { + if len(j.Tags) == 0 { + return errors.MissingRequiredFieldError{Name: "Tags"} + } + for _, value := range j.Tags { + if err := value.validate(); err != nil { + return err + } + } + return nil +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/set_buckets_mirror/api.go b/storagev2/apis/set_buckets_mirror/api.go new file mode 100644 index 00000000..d1f2a478 --- /dev/null +++ b/storagev2/apis/set_buckets_mirror/api.go @@ -0,0 +1,17 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 设置存储空间的镜像源 +package set_buckets_mirror + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Bucket string // 需要设定镜像源的目标空间名 + SrcSiteUrl string // 镜像源的访问域名,必须设置为形如 `http(s)://source.com` 或 `http(s)://114.114.114.114` 的字符串 + Host string // 回源时使用的 `Host` 头部值 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/set_object_file_type/api.go b/storagev2/apis/set_object_file_type/api.go new file mode 100644 index 00000000..246ecb8a --- /dev/null +++ b/storagev2/apis/set_object_file_type/api.go @@ -0,0 +1,16 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 修改文件的存储类型信息,可以实现标准存储、低频存储和归档存储之间的互相转换 +package set_object_file_type + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Entry string // 指定目标对象空间与目标对象名称,格式为 <目标对象空间>:<目标对象名称> + Type int64 // `0` 表示标准存储;`1` 表示低频存储;`2` 表示归档存储 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apis/stat_object/api.go b/storagev2/apis/stat_object/api.go new file mode 100644 index 00000000..2a8fabaf --- /dev/null +++ b/storagev2/apis/stat_object/api.go @@ -0,0 +1,106 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 仅获取对象的元信息,不返回对象的内容 +package stat_object + +import ( + "encoding/json" + credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + errors "github.com/qiniu/go-sdk/v7/storagev2/errors" +) + +// 调用 API 所用的请求 +type Request struct { + Entry string // 指定目标对象空间与目标对象名称,格式为 <目标对象空间>:<目标对象名称> + NeedParts bool // 如果文件是通过分片上传的,是否返回对应的分片信息 + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider +} + +// 获取 API 所用的响应 +type Response struct { + Size int64 // 对象大小,单位为字节 + Hash string // 对象哈希值 + MimeType string // 对象 MIME 类型 + Type int64 // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储 + PutTime int64 // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒 + EndUser string // 资源内容的唯一属主标识 + RestoringStatus int64 // 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段 + Status int64 // 文件状态。`1` 表示禁用;只有禁用状态的文件才会返回该字段 + Md5 string // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回 + ExpirationTime int64 // 文件过期删除日期,UNIX 时间戳格式,文件在设置过期时间后才会返回该字段 + TransitionToIaTime int64 // 文件生命周期中转为低频存储的日期,UNIX 时间戳格式,文件在设置转低频后才会返回该字段 + TransitionToArchiveTime int64 // 文件生命周期中转为归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段 + TransitionToDeepArchiveTime int64 // 文件生命周期中转为深度归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段 + TransitionToArchiveIrTime int64 // 文件生命周期中转为归档直读存储的日期,UNIX 时间戳格式,文件在设置转归档直读后才会返回该字段 + Metadata map[string]string // 对象存储元信息 + Parts PartSizes // 每个分片的大小,如没有指定 need_parts 参数则不返回 +} + +// 每个分片的大小 +type PartSizes = []int64 + +// 文件元信息 +type ObjectMetadata = Response +type jsonResponse struct { + Size int64 `json:"fsize,omitempty"` // 对象大小,单位为字节 + Hash string `json:"hash"` // 对象哈希值 + MimeType string `json:"mimeType"` // 对象 MIME 类型 + Type int64 `json:"type"` // 对象存储类型,`0` 表示普通存储,`1` 表示低频存储,`2` 表示归档存储 + PutTime int64 `json:"putTime"` // 文件上传时间,UNIX 时间戳格式,单位为 100 纳秒 + EndUser string `json:"endUser,omitempty"` // 资源内容的唯一属主标识 + RestoringStatus int64 `json:"restoreStatus,omitempty"` // 归档存储文件的解冻状态,`2` 表示解冻完成,`1` 表示解冻中;归档文件冻结时,不返回该字段 + Status int64 `json:"status,omitempty"` // 文件状态。`1` 表示禁用;只有禁用状态的文件才会返回该字段 + Md5 string `json:"md5,omitempty"` // 对象 MD5 值,只有通过直传文件和追加文件 API 上传的文件,服务端确保有该字段返回 + ExpirationTime int64 `json:"expiration,omitempty"` // 文件过期删除日期,UNIX 时间戳格式,文件在设置过期时间后才会返回该字段 + TransitionToIaTime int64 `json:"transitionToIA,omitempty"` // 文件生命周期中转为低频存储的日期,UNIX 时间戳格式,文件在设置转低频后才会返回该字段 + TransitionToArchiveTime int64 `json:"transitionToARCHIVE,omitempty"` // 文件生命周期中转为归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段 + TransitionToDeepArchiveTime int64 `json:"transitionToDeepArchive,omitempty"` // 文件生命周期中转为深度归档存储的日期,UNIX 时间戳格式,文件在设置转归档后才会返回该字段 + TransitionToArchiveIrTime int64 `json:"transitionToArchiveIR,omitempty"` // 文件生命周期中转为归档直读存储的日期,UNIX 时间戳格式,文件在设置转归档直读后才会返回该字段 + Metadata map[string]string `json:"x-qn-meta,omitempty"` // 对象存储元信息 + Parts PartSizes `json:"parts,omitempty"` // 每个分片的大小,如没有指定 need_parts 参数则不返回 +} + +func (j *Response) MarshalJSON() ([]byte, error) { + if err := j.validate(); err != nil { + return nil, err + } + return json.Marshal(&jsonResponse{Size: j.Size, Hash: j.Hash, MimeType: j.MimeType, Type: j.Type, PutTime: j.PutTime, EndUser: j.EndUser, RestoringStatus: j.RestoringStatus, Status: j.Status, Md5: j.Md5, ExpirationTime: j.ExpirationTime, TransitionToIaTime: j.TransitionToIaTime, TransitionToArchiveTime: j.TransitionToArchiveTime, TransitionToDeepArchiveTime: j.TransitionToDeepArchiveTime, TransitionToArchiveIrTime: j.TransitionToArchiveIrTime, Metadata: j.Metadata, Parts: j.Parts}) +} +func (j *Response) UnmarshalJSON(data []byte) error { + var nj jsonResponse + if err := json.Unmarshal(data, &nj); err != nil { + return err + } + j.Size = nj.Size + j.Hash = nj.Hash + j.MimeType = nj.MimeType + j.Type = nj.Type + j.PutTime = nj.PutTime + j.EndUser = nj.EndUser + j.RestoringStatus = nj.RestoringStatus + j.Status = nj.Status + j.Md5 = nj.Md5 + j.ExpirationTime = nj.ExpirationTime + j.TransitionToIaTime = nj.TransitionToIaTime + j.TransitionToArchiveTime = nj.TransitionToArchiveTime + j.TransitionToDeepArchiveTime = nj.TransitionToDeepArchiveTime + j.TransitionToArchiveIrTime = nj.TransitionToArchiveIrTime + j.Metadata = nj.Metadata + j.Parts = nj.Parts + return nil +} +func (j *Response) validate() error { + if j.Hash == "" { + return errors.MissingRequiredFieldError{Name: "Hash"} + } + if j.MimeType == "" { + return errors.MissingRequiredFieldError{Name: "MimeType"} + } + if j.Type == 0 { + return errors.MissingRequiredFieldError{Name: "Type"} + } + if j.PutTime == 0 { + return errors.MissingRequiredFieldError{Name: "PutTime"} + } + return nil +} diff --git a/storagev2/apis/update_bucket_rules/api.go b/storagev2/apis/update_bucket_rules/api.go new file mode 100644 index 00000000..73a0a3d6 --- /dev/null +++ b/storagev2/apis/update_bucket_rules/api.go @@ -0,0 +1,22 @@ +// THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY! + +// 更新空间规则 +package update_bucket_rules + +import credentials "github.com/qiniu/go-sdk/v7/storagev2/credentials" + +// 调用 API 所用的请求 +type Request struct { + Credentials credentials.CredentialsProvider // 鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider + Bucket string // 空间名称 + Name string // 要修改的规则名称 + Prefix string // 指定匹配的对象名称前缀 + DeleteAfterDays int64 // 指定上传文件多少天后删除,指定为 0 表示不删除,大于 0 表示多少天后删除 + ToIaAfterDays int64 // 指定文件上传多少天后转低频存储。指定为 0 表示不转低频存储 + ToArchiveAfterDays int64 // 指定文件上传多少天后转归档存储。指定为 0 表示不转归档存储 + ToDeepArchiveAfterDays int64 // 指定文件上传多少天后转深度归档存储。指定为 0 表示不转深度归档存储 + ToArchiveIrAfterDays int64 // 指定文件上传多少天后转归档直读存储。指定为 0 表示不转归档直读存储 +} + +// 获取 API 所用的响应 +type Response struct{} diff --git a/storagev2/apistest/apis_test.go b/storagev2/apistest/apis_test.go new file mode 100644 index 00000000..4d3e83d2 --- /dev/null +++ b/storagev2/apistest/apis_test.go @@ -0,0 +1,107 @@ +//go:build integration +// +build integration + +package apistest_test + +import ( + "bytes" + "context" + "math/rand" + "os" + "testing" + "time" + + "github.com/qiniu/go-sdk/v7/client" + internal_io "github.com/qiniu/go-sdk/v7/internal/io" + "github.com/qiniu/go-sdk/v7/storagev2/apis" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/errors" + "github.com/qiniu/go-sdk/v7/storagev2/http_client" + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +var ( + testAK = os.Getenv("accessKey") + testSK = os.Getenv("secretKey") + testBucket = os.Getenv("QINIU_TEST_BUCKET") + testDebug = os.Getenv("QINIU_SDK_DEBUG") + + testKey = "qiniu.png" +) + +func init() { + if testDebug == "true" { + client.TurnOnDebug() + } +} + +func TestMkBlk(t *testing.T) { + credentials := credentials.NewCredentials(testAK, testSK) + storageClient := apis.NewStorage(&http_client.Options{ + Credentials: credentials, + }) + putPolicy, err := uptoken.NewPutPolicy(testBucket, time.Now().Add(30*time.Minute)) + if err != nil { + t.Fatal(err) + } + + r := rand.New(rand.NewSource(time.Now().UnixNano())) + buf := make([]byte, 4*1024*1024) + if _, err = r.Read(buf); err != nil { + t.Fatal(err) + } + bufReader := bytes.NewReader(buf) + + if _, err = storageClient.ResumableUploadV1MakeBlock(context.Background(), &apis.ResumableUploadV1MakeBlockRequest{ + BlockSize: 4 * 1024 * 1024, + UpToken: uptoken.NewSigner(putPolicy, credentials), + Body: internal_io.NewReadSeekableNopCloser(bufReader), + }, nil); err != nil { + t.Fatal(err) + } + + if _, err = storageClient.ResumableUploadV1MakeBlock(context.Background(), &apis.ResumableUploadV1MakeBlockRequest{ + BlockSize: 4 * 1024 * 1024, + Body: internal_io.NewReadSeekableNopCloser(bufReader), + }, nil); err != nil { + if err.(errors.MissingRequiredFieldError).Name != "UpToken" { + t.FailNow() + } + } +} + +func TestCreateBucket(t *testing.T) { + credentials := credentials.NewCredentials(testAK, testSK) + _, err := apis.NewStorage(&http_client.Options{Credentials: credentials}).CreateBucket(context.Background(), &apis.CreateBucketRequest{ + Bucket: testBucket, + Region: "z0", + }, nil) + if err != nil { + if err.Error() != "the bucket already exists and you own it." { + t.Fatal(err) + } + } else { + t.FailNow() + } + + _, err = apis.NewStorage(nil).CreateBucket(context.Background(), &apis.CreateBucketRequest{ + Bucket: testBucket, + Credentials: credentials, + Region: "z0", + }, nil) + if err != nil { + if err.Error() != "the bucket already exists and you own it." { + t.Fatal(err) + } + } else { + t.FailNow() + } + + _, err = apis.NewStorage(nil).CreateBucket(context.Background(), &apis.CreateBucketRequest{ + Bucket: testBucket, + Region: "z0", + }, nil) + if err == nil || err.(errors.MissingRequiredFieldError).Name != "Credentials" { + t.FailNow() + } +} diff --git a/storagev2/credentials/credentials.go b/storagev2/credentials/credentials.go new file mode 100644 index 00000000..156f7cb3 --- /dev/null +++ b/storagev2/credentials/credentials.go @@ -0,0 +1,57 @@ +package credentials + +import ( + "context" + "errors" + "os" + + "github.com/qiniu/go-sdk/v7/auth" +) + +// Credentials 七牛鉴权类,用于生成Qbox, Qiniu, Upload签名 +// +// AK/SK可以从 https://portal.qiniu.com/user/key 获取 +type Credentials = auth.Credentials + +// NewCredentials 构建一个 Credentials 对象 +func NewCredentials(accessKey, secretKey string) *Credentials { + return auth.New(accessKey, secretKey) +} + +// CredentialsProvider 获取 Credentials 对象的接口 +type CredentialsProvider interface { + Get(context.Context) (*Credentials, error) +} + +// EnvironmentVariableCredentialProvider 从环境变量中获取 Credential +type EnvironmentVariableCredentialProvider struct{} + +func (provider *EnvironmentVariableCredentialProvider) Get(ctx context.Context) (credential *Credentials, err error) { + accessKey := os.Getenv("QINIU_ACCESS_KEY") + secretKey := os.Getenv("QINIU_SECRET_KEY") + if accessKey == "" { + return nil, errors.New("QINIU_ACCESS_KEY is not set") + } + if secretKey == "" { + return nil, errors.New("QINIU_SECRET_KEY is not set") + } + return NewCredentials(accessKey, secretKey), nil +} + +var _ CredentialsProvider = (*EnvironmentVariableCredentialProvider)(nil) + +// ChainedCredentialsProvider 存储多个 CredentialsProvider,逐个尝试直到成功获取第一个 Credentials 为止 +type ChainedCredentialsProvider struct { + providers []CredentialsProvider +} + +func (provider *ChainedCredentialsProvider) Get(ctx context.Context) (credential *Credentials, err error) { + for _, provider := range provider.providers { + if credential, err = provider.Get(ctx); err == nil { + return + } + } + return +} + +var _ CredentialsProvider = (*ChainedCredentialsProvider)(nil) diff --git a/storagev2/doc.go b/storagev2/doc.go new file mode 100644 index 00000000..058d8ea4 --- /dev/null +++ b/storagev2/doc.go @@ -0,0 +1,4 @@ +// storagev2 包提供了资源管理等功能。 +package storagev2 + +//go:generate go run ./internal/api-generator diff --git a/storagev2/errors/errors.go b/storagev2/errors/errors.go new file mode 100644 index 00000000..4858c4e7 --- /dev/null +++ b/storagev2/errors/errors.go @@ -0,0 +1,13 @@ +package errors + +import "fmt" + +type ( + MissingRequiredFieldError struct { + Name string + } +) + +func (err MissingRequiredFieldError) Error() string { + return fmt.Sprintf("missing required field `%s`", err.Name) +} diff --git a/storagev2/http_client/http_client.go b/storagev2/http_client/http_client.go new file mode 100644 index 00000000..3a11f064 --- /dev/null +++ b/storagev2/http_client/http_client.go @@ -0,0 +1,322 @@ +package http_client + +import ( + "context" + "errors" + "io" + "net/http" + "net/url" + "strings" + "time" + + "github.com/qiniu/go-sdk/v7/auth" + clientv1 "github.com/qiniu/go-sdk/v7/client" + "github.com/qiniu/go-sdk/v7/internal/clientv2" + "github.com/qiniu/go-sdk/v7/internal/hostprovider" + compatible_io "github.com/qiniu/go-sdk/v7/internal/io" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/region" + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +var ( + ErrNoRegion = errors.New("no region from provider") + ErrNoEndpointsConfigured = errors.New("no endpoints configured") +) + +type ( + InterceptorPriority = clientv2.InterceptorPriority + Interceptor = clientv2.Interceptor + BasicHTTPClient = clientv2.Client + GetRequestBody = clientv2.GetRequestBody + RetryConfig = clientv2.RetryConfig + Handler = clientv2.Handler + + // Client 提供了对七牛 HTTP 客户端 + Client struct { + useHttps bool + basicHTTPClient BasicHTTPClient + bucketQuery region.BucketRegionsQuery + regions region.RegionsProvider + credentials credentials.CredentialsProvider + hostRetryConfig *RetryConfig + hostsRetryConfig *RetryConfig + hostFreezeDuration time.Duration + shouldFreezeHost func(req *http.Request, resp *http.Response, err error) bool + } + + // Options 为构建 Client 提供了可选参数 + Options struct { + BasicHTTPClient BasicHTTPClient + BucketQuery region.BucketRegionsQuery + Regions region.RegionsProvider + Credentials credentials.CredentialsProvider + Interceptors []Interceptor + UseInsecureProtocol bool + HostRetryConfig *RetryConfig + HostsRetryConfig *RetryConfig + HostFreezeDuration time.Duration + ShouldFreezeHost func(req *http.Request, resp *http.Response, err error) bool + } + + // Request 包含一个具体的 HTTP 请求的参数 + Request struct { + Method string + ServiceNames []region.ServiceName + Endpoints region.EndpointsProvider + Region region.RegionsProvider + Path string + RawQuery string + Query url.Values + Header http.Header + RequestBody GetRequestBody + Credentials credentials.CredentialsProvider + AuthType auth.TokenType + UpToken uptoken.UpTokenProvider + BufferResponse bool + } +) + +// NewClient 用来构建一个新的七牛 HTTP 客户端 +func NewClient(options *Options) *Client { + if options == nil { + options = &Options{} + } + if options.HostFreezeDuration < time.Millisecond { + options.HostFreezeDuration = 600 * time.Second + } + if options.ShouldFreezeHost == nil { + options.ShouldFreezeHost = defaultShouldFreezeHost + } + + return &Client{ + basicHTTPClient: clientv2.NewClient(options.BasicHTTPClient, options.Interceptors...), + useHttps: !options.UseInsecureProtocol, + bucketQuery: options.BucketQuery, + regions: options.Regions, + credentials: options.Credentials, + hostRetryConfig: options.HostRetryConfig, + hostsRetryConfig: options.HostsRetryConfig, + hostFreezeDuration: options.HostFreezeDuration, + shouldFreezeHost: options.ShouldFreezeHost, + } +} + +// Do 发送 HTTP 请求 +func (httpClient *Client) Do(ctx context.Context, request *Request) (*http.Response, error) { + req, err := httpClient.makeReq(ctx, request) + if err != nil { + return nil, err + } + if upTokenProvider := request.UpToken; upTokenProvider != nil { + if upToken, err := upTokenProvider.GetUpToken(ctx); err != nil { + return nil, err + } else { + req.Header.Set("Authorization", "UpToken "+upToken) + } + } else { + credentialsProvider := request.Credentials + if credentialsProvider == nil { + credentialsProvider = httpClient.credentials + } + if credentialsProvider != nil { + if credentials, err := credentialsProvider.Get(ctx); err != nil { + return nil, err + } else { + req = clientv2.WithInterceptors(req, clientv2.NewAuthInterceptor(clientv2.AuthConfig{ + Credentials: credentials, + TokenType: request.AuthType, + })) + } + } + } + return httpClient.basicHTTPClient.Do(req) +} + +// DoAndAcceptJSON 发送 HTTP 请求并接收 JSON 响应 +func (httpClient *Client) DoAndAcceptJSON(ctx context.Context, request *Request, ret interface{}) error { + if resp, err := httpClient.Do(ctx, request); err != nil { + return err + } else { + return clientv1.DecodeJsonFromReader(resp.Body, ret) + } +} + +func (httpClient *Client) GetBucketQuery() region.BucketRegionsQuery { + return httpClient.bucketQuery +} + +func (httpClient *Client) GetCredentials() credentials.CredentialsProvider { + return httpClient.credentials +} + +func (httpClient *Client) GetRegions() region.RegionsProvider { + return httpClient.regions +} + +func (httpClient *Client) GetClient() BasicHTTPClient { + return httpClient.basicHTTPClient +} + +func (httpClient *Client) UseInsecureProtocol() bool { + return !httpClient.useHttps +} + +func (httpClient *Client) GetHostFreezeDuration() time.Duration { + return httpClient.hostFreezeDuration +} + +func (httpClient *Client) GetHostRetryConfig() *RetryConfig { + return httpClient.hostRetryConfig +} + +func (httpClient *Client) GetHostsRetryConfig() *RetryConfig { + return httpClient.hostsRetryConfig +} + +func (httpClient *Client) getEndpoints(ctx context.Context, request *Request) (region.Endpoints, error) { + getEndpointsFromEndpointsProvider := func(ctx context.Context, endpoints region.EndpointsProvider) (region.Endpoints, error) { + return endpoints.GetEndpoints(ctx) + } + getEndpointsFromRegionsProvider := func(ctx context.Context, regions region.RegionsProvider, serviceNames []region.ServiceName) (region.Endpoints, error) { + rs, err := regions.GetRegions(ctx) + if err != nil { + return region.Endpoints{}, err + } else if len(rs) == 0 { + return region.Endpoints{}, ErrNoRegion + } + r := rs[0] + return r.Endpoints(request.ServiceNames) + } + if request.Endpoints != nil { + return getEndpointsFromEndpointsProvider(ctx, request.Endpoints) + } else if request.Region != nil && len(request.ServiceNames) > 0 { + return getEndpointsFromRegionsProvider(ctx, request.Region, request.ServiceNames) + } else if httpClient.regions != nil && len(request.ServiceNames) > 0 { + return getEndpointsFromRegionsProvider(ctx, httpClient.regions, request.ServiceNames) + } + return region.Endpoints{}, ErrNoEndpointsConfigured +} + +func (httpClient *Client) makeReq(ctx context.Context, request *Request) (*http.Request, error) { + endpoints, err := httpClient.getEndpoints(ctx, request) + if err != nil { + return nil, err + } + hostProvider := endpoints.ToHostProvider() + url, err := httpClient.generateUrl(request, hostProvider) + if err != nil { + return nil, err + } + + interceptors := make([]Interceptor, 0, 2) + hostsRetryConfig := httpClient.hostsRetryConfig + if hostsRetryConfig == nil { + hostsRetryConfig = &RetryConfig{ + RetryMax: len(endpoints.Preferred) + len(endpoints.Alternative), + } + } + interceptors = append(interceptors, clientv2.NewHostsRetryInterceptor(clientv2.HostsRetryConfig{ + RetryConfig: *hostsRetryConfig, + HostProvider: hostProvider, + HostFreezeDuration: httpClient.hostFreezeDuration, + ShouldFreezeHost: httpClient.shouldFreezeHost, + })) + if httpClient.hostRetryConfig != nil { + interceptors = append(interceptors, clientv2.NewSimpleRetryInterceptor(*httpClient.hostRetryConfig)) + } + req, err := clientv2.NewRequest(clientv2.RequestParams{ + Context: ctx, + Method: request.Method, + Url: url, + Header: request.Header, + GetBody: request.RequestBody, + BufferResponse: request.BufferResponse, + }) + if err != nil { + return nil, err + } + return clientv2.WithInterceptors(req, interceptors...), nil +} + +func (httpClient *Client) generateUrl(request *Request, hostProvider hostprovider.HostProvider) (string, error) { + var url string + host, err := hostProvider.Provider() + if err != nil { + return "", err + } + if strings.Contains(host, "://") { + url = host + } else { + if httpClient.useHttps { + url = "https://" + } else { + url = "http://" + } + url += host + } + if !strings.HasPrefix(request.Path, "/") { + url += "/" + } + url += request.Path + if request.RawQuery != "" || request.Query != nil { + url += "?" + var rawQuery string + if request.RawQuery != "" { + rawQuery = request.RawQuery + } + if request.Query != nil { + if rawQuery != "" { + rawQuery += "&" + } + rawQuery += request.Query.Encode() + } + url += rawQuery + } + return url, nil +} + +func (options *Options) SetBucketHosts(bucketHosts region.Endpoints) (err error) { + options.BucketQuery, err = region.NewBucketRegionsQuery(bucketHosts, nil) + return +} + +// GetFormRequestBody 将数据通过 Form 作为请求 Body 发送 +func GetFormRequestBody(info map[string][]string) GetRequestBody { + return clientv2.GetFormRequestBody(info) +} + +// GetJsonRequestBody 将数据通过 JSON 作为请求 Body 发送 +func GetJsonRequestBody(object interface{}) (GetRequestBody, error) { + return clientv2.GetJsonRequestBody(object) +} + +// MultipartForm 用来构建 Multipart 表单 +type MultipartForm = clientv2.MultipartForm + +// GetMultipartFormRequestBody 将数据通过 Multipart 表单作为请求 Body 发送 +func GetMultipartFormRequestBody(info *MultipartForm) GetRequestBody { + return clientv2.GetMultipartFormRequestBody(info) +} + +// GetMultipartFormRequestBody 将二进制数据请求 Body 发送 +func GetRequestBodyFromReadSeekCloser(r compatible_io.ReadSeekCloser) GetRequestBody { + return func(*clientv2.RequestParams) (io.ReadCloser, error) { + _, err := r.Seek(0, io.SeekStart) + return r, err + } +} + +var defaultBucketHosts = region.Endpoints{ + Preferred: []string{"uc.qiniuapi.com", "kodo-config.qiniuapi.com"}, + Alternative: []string{"uc.qbox.me"}, +} + +// DefaultBucketHosts 默认的 Bucket 域名列表 +func DefaultBucketHosts() region.Endpoints { + return defaultBucketHosts.Clone() +} + +func defaultShouldFreezeHost(*http.Request, *http.Response, error) bool { + return true +} diff --git a/storagev2/http_client/http_client_test.go b/storagev2/http_client/http_client_test.go new file mode 100644 index 00000000..b092599d --- /dev/null +++ b/storagev2/http_client/http_client_test.go @@ -0,0 +1,147 @@ +//go:build unit +// +build unit + +package http_client + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + + clientv1 "github.com/qiniu/go-sdk/v7/client" + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/region" +) + +func TestHttpClient(t *testing.T) { + type Req struct { + id int + url *url.URL + } + var reqs = make([]Req, 0, 3) + mux_1 := http.NewServeMux() + mux_1.HandleFunc("/test", func(w http.ResponseWriter, r *http.Request) { + reqs = append(reqs, Req{id: 1, url: r.URL}) + if auth := r.Header.Get("Authorization"); !strings.HasPrefix(auth, "Qiniu TestAk:") { + t.Fatalf("Unexpected authorization: %s", auth) + } + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "test error") + }) + server_1 := httptest.NewServer(mux_1) + defer server_1.Close() + + mux_2 := http.NewServeMux() + mux_2.HandleFunc("/test", func(w http.ResponseWriter, r *http.Request) { + reqs = append(reqs, Req{id: 2, url: r.URL}) + if auth := r.Header.Get("Authorization"); !strings.HasPrefix(auth, "Qiniu TestAk:") { + t.Fatalf("Unexpected authorization: %s", auth) + } + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "test error") + }) + server_2 := httptest.NewServer(mux_2) + defer server_2.Close() + + mux_3 := http.NewServeMux() + mux_3.HandleFunc("/test", func(w http.ResponseWriter, r *http.Request) { + reqs = append(reqs, Req{id: 3, url: r.URL}) + if auth := r.Header.Get("Authorization"); !strings.HasPrefix(auth, "Qiniu TestAk:") { + t.Fatalf("Unexpected authorization: %s", auth) + } + w.WriteHeader(http.StatusInternalServerError) + io.WriteString(w, "test error") + }) + server_3 := httptest.NewServer(mux_3) + defer server_3.Close() + + httpClient := NewClient(&Options{ + Regions: ®ion.Region{ + Api: region.Endpoints{ + Preferred: []string{server_1.URL, server_2.URL}, + Alternative: []string{server_3.URL}, + }, + }, + }) + _, err := httpClient.Do(context.Background(), &Request{ + ServiceNames: []region.ServiceName{region.ServiceApi}, + Method: http.MethodGet, + Path: "/test", + RawQuery: "fakeRawQuery", + Query: url.Values{ + "x-query-1": {"x-value-1"}, + "x-query-2": {"x-value-2"}, + }, + Header: http.Header{ + "x-qiniu-1": {"x-value-1"}, + "x-qiniu-2": {"x-value-2"}, + }, + Credentials: credentials.NewCredentials("TestAk", "TestSk"), + }) + if err == nil { + t.Fatalf("Expected error") + } + if clientErr, ok := err.(*clientv1.ErrorInfo); ok { + if clientErr.Code != http.StatusInternalServerError { + t.Fatalf("Unexpected status code: %d", clientErr.Code) + } + } + if len(reqs) != 3 { + t.Fatalf("Unexpected reqs: %#v", reqs) + } + for i, req := range reqs { + if i+1 != req.id || req.url.String() != "/test?fakeRawQuery&x-query-1=x-value-1&x-query-2=x-value-2" { + t.Fatalf("Unexpected req: %#v", req) + } + } +} + +func TestHttpClientJson(t *testing.T) { + mux_1 := http.NewServeMux() + mux_1.HandleFunc("/test", func(w http.ResponseWriter, r *http.Request) { + if auth := r.Header.Get("Authorization"); !strings.HasPrefix(auth, "Qiniu TestAk:") { + t.Fatalf("Unexpected authorization: %s", auth) + } + io.WriteString(w, "{\"Test\":\"AccessKey\"}") + }) + server_1 := httptest.NewServer(mux_1) + defer server_1.Close() + + httpClient := NewClient(&Options{ + Regions: ®ion.Region{ + Api: region.Endpoints{ + Preferred: []string{server_1.URL}, + }, + }, + }) + + var body struct { + Test string `json:"Test"` + } + + err := httpClient.DoAndAcceptJSON(context.Background(), &Request{ + ServiceNames: []region.ServiceName{region.ServiceApi}, + Method: http.MethodGet, + Path: "/test", + RawQuery: "fakeRawQuery", + Query: url.Values{ + "x-query-1": {"x-value-1"}, + "x-query-2": {"x-value-2"}, + }, + Header: http.Header{ + "x-qiniu-1": {"x-value-1"}, + "x-qiniu-2": {"x-value-2"}, + }, + Credentials: credentials.NewCredentials("TestAk", "TestSk"), + }, &body) + if err != nil { + t.Fatalf("Unexpected error: %s", err) + } + if body.Test != "AccessKey" { + t.Fatalf("Unexpected body: %#v", body) + } +} diff --git a/storagev2/http_client/multipart.go b/storagev2/http_client/multipart.go new file mode 100644 index 00000000..1874bd22 --- /dev/null +++ b/storagev2/http_client/multipart.go @@ -0,0 +1,8 @@ +package http_client + +import compatible_io "github.com/qiniu/go-sdk/v7/internal/io" + +type MultipartFormBinaryData struct { + Data compatible_io.ReadSeekCloser + Name string +} diff --git a/storagev2/internal/api-generator/client.go b/storagev2/internal/api-generator/client.go new file mode 100644 index 00000000..3670c2cd --- /dev/null +++ b/storagev2/internal/api-generator/client.go @@ -0,0 +1,660 @@ +package main + +import ( + "strings" + + "github.com/dave/jennifer/jen" + "github.com/iancoleman/strcase" +) + +type ( + ApiDetailedDescription struct { + CamelCaseName string `yaml:"camel_case_name,omitempty"` + SnakeCaseName string `yaml:"snake_case_name,omitempty"` + Method MethodName `yaml:"method,omitempty"` + ServiceNames []ServiceName `yaml:"service_names,omitempty"` + Documentation string `yaml:"documentation,omitempty"` + Command string `yaml:"command,omitempty"` + BasePath string `yaml:"base_path,omitempty"` + PathSuffix string `yaml:"path_suffix,omitempty"` + Request ApiRequestDescription `yaml:"request,omitempty"` + Response ApiResponseDescription `yaml:"response,omitempty"` + } + + CodeGeneratorOptions struct { + Name, Documentation string + CamelCaseName, SnakeCaseName string + apiDetailedDescription *ApiDetailedDescription + } +) + +func (options *CodeGeneratorOptions) camelCaseName() string { + if options.CamelCaseName != "" { + return options.CamelCaseName + } + return strcase.ToCamel(options.Name) +} + +func (description *ApiDetailedDescription) generateSubPackages(group *jen.Group, _ CodeGeneratorOptions) (err error) { + if body := description.Response.Body; body != nil { + if bodyJson := body.Json; bodyJson != nil && bodyJson.Any { + description.Request.responseTypeRequired = true + } + } + if err = description.Request.generate(group, CodeGeneratorOptions{ + Name: "Request", + Documentation: "调用 API 所用的请求", + apiDetailedDescription: description, + }); err != nil { + return + } + if err = description.Response.generate(group, CodeGeneratorOptions{ + Name: "Response", + Documentation: "获取 API 所用的响应", + apiDetailedDescription: description, + }); err != nil { + return + } + return +} + +func (description *ApiDetailedDescription) generatePackage(group *jen.Group, options CodeGeneratorOptions) (err error) { + packageName := PackageNameApis + "/" + options.Name + innerStructName := "inner" + options.camelCaseName() + "Request" + reexportedRequestStructName := options.camelCaseName() + "Request" + reexportedResponseStructName := options.camelCaseName() + "Response" + group.Add(jen.Type().Id(innerStructName).Qual(packageName, "Request")) + var getBucketNameGenerated bool + if getBucketNameGenerated, err = description.generateGetBucketNameFunc(group, innerStructName); err != nil { + return + } + if err = description.generateBuildFunc(group, innerStructName); err != nil { + return + } + if body := description.Request.Body; body != nil && body.Json != nil { + if err = description.addJsonMarshalerUnmarshaler(group, innerStructName, packageName, "Request"); err != nil { + return + } + } + if err = description.Request.generateGetAccessKeyFunc(group, innerStructName); err != nil { + return + } + group.Add(jen.Type().Id(reexportedRequestStructName).Op("=").Qual(packageName, "Request")) + group.Add(jen.Type().Id(reexportedResponseStructName).Op("=").Qual(packageName, "Response")) + if options.Documentation != "" { + group.Add(jen.Comment(options.Documentation)) + } + group.Add( + jen.Func(). + Params(jen.Id("storage").Op("*").Id("Storage")). + Id(options.camelCaseName()). + Params( + jen.Id("ctx").Qual("context", "Context"), + jen.Id("request").Op("*").Id(reexportedRequestStructName), + jen.Id("options").Op("*").Id("Options"), + ). + Params( + jen.Op("*").Id(reexportedResponseStructName), + jen.Error(), + ). + BlockFunc(func(group *jen.Group) { + group.Add(jen.If(jen.Id("options").Op("==").Nil()).BlockFunc(func(group *jen.Group) { + group.Id("options").Op("=").Op("&").Id("Options").Values() + })) + group.Add(jen.Id("innerRequest").Op(":=").Parens(jen.Op("*").Id(innerStructName)).Parens(jen.Id("request"))) + group.Add( + jen.Id("serviceNames"). + Op(":="). + Index(). + Qual(PackageNameRegion, "ServiceName"). + ValuesFunc(func(group *jen.Group) { + for _, serviceName := range description.ServiceNames { + if code, e := serviceName.ToServiceName(); e != nil { + err = e + return + } else { + group.Add(code) + } + } + }), + ) + switch description.Request.Authorization.ToAuthorization() { + case AuthorizationQbox, AuthorizationQiniu: + group.Add(jen.If( + jen.Id("innerRequest").Dot("Credentials").Op("==").Nil().Op("&&"). + Id("storage").Dot("client").Dot("GetCredentials").Call().Op("==").Nil()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return( + jen.Nil(), + jen.Qual(PackageNameErrors, "MissingRequiredFieldError"). + ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("Name").Op(":").Lit("Credentials")) + }), + )) + })) + case AuthorizationUpToken: + group.Add(jen.If(jen.Id("innerRequest").Dot("UpToken").Op("==").Nil()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return( + jen.Nil(), + jen.Qual(PackageNameErrors, "MissingRequiredFieldError"). + ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("Name").Op(":").Lit("UpToken")) + }), + )) + })) + } + if description.Request.HeaderNames != nil { + group.Add( + jen.List(jen.Id("headers"), jen.Err()).Op(":=").Id("innerRequest").Dot("buildHeaders").Call(), + ) + group.Add( + jen.If( + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Nil(), jen.Err())) + }), + ) + } + group.Add(jen.Var().Id("pathSegments").Index().String()) + if description.BasePath != "" { + group.Add(jen.Id("pathSegments").Op("=").AppendFunc(func(group *jen.Group) { + group.Add(jen.Id("pathSegments")) + for _, pathSegment := range description.getBasePathSegments() { + group.Add(jen.Lit(pathSegment)) + } + })) + } + if description.Request.PathParams != nil { + group.Add( + jen.If( + jen.List(jen.Id("segments"), jen.Err()). + Op(":="). + Id("innerRequest"). + Dot("buildPath"). + Call(), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Nil(), jen.Err())) + }).Else().BlockFunc(func(group *jen.Group) { + group.Add(jen.Id("pathSegments").Op("=").Append( + jen.Id("pathSegments"), + jen.Id("segments").Op("..."), + )) + }), + ) + } + if description.PathSuffix != "" { + group.Add(jen.Id("pathSegments").Op("=").AppendFunc(func(group *jen.Group) { + group.Add(jen.Id("pathSegments")) + for _, pathSegment := range description.getPathSuffixSegments() { + group.Add(jen.Lit(pathSegment)) + } + })) + } + group.Add(jen.Id("path").Op(":=").Lit("/").Op("+").Qual("strings", "Join").Call(jen.Id("pathSegments"), jen.Lit("/"))) + if description.Command != "" { + group.Add(jen.Id("rawQuery").Op(":=").Lit(description.Command + "&")) + } else { + group.Add(jen.Var().Id("rawQuery").String()) + } + if description.Request.QueryNames != nil { + group.Add( + jen.If( + jen.List(jen.Id("query"), jen.Err()).Op(":=").Id("innerRequest").Dot("buildQuery").Call(), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Nil(), jen.Err())) + }).Else().BlockFunc(func(group *jen.Group) { + group.Id("rawQuery").Op("+=").Id("query").Dot("Encode").Call() + }), + ) + } + if requestBody := description.Request.Body; requestBody != nil { + if json := requestBody.Json; json != nil { + group.Add( + jen.List(jen.Id("body"), jen.Err()). + Op(":="). + Qual(PackageNameHTTPClient, "GetJsonRequestBody"). + Call(jen.Op("&").Id("innerRequest")), + ) + group.Add( + jen.If(jen.Err().Op("!=").Nil()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Nil(), jen.Err())) + }), + ) + } else if multipartForm := requestBody.MultipartFormData; multipartForm != nil { + group.Add( + jen.List(jen.Id("body"), jen.Err()). + Op(":="). + Id("innerRequest"). + Dot("build"). + Call(jen.Id("ctx")), + ) + group.Add( + jen.If(jen.Err().Op("!=").Nil()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Nil(), jen.Err())) + }), + ) + } else if form := requestBody.FormUrlencoded; form != nil { + group.Add( + jen.List(jen.Id("body"), jen.Err()). + Op(":="). + Id("innerRequest"). + Dot("build"). + Call(), + ) + group.Add( + jen.If(jen.Err().Op("!=").Nil()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Nil(), jen.Err())) + }), + ) + } else if requestBody.BinaryData { + group.Add( + jen.Id("body").Op(":=").Id("innerRequest").Dot("Body"), + ) + group.Add( + jen.If(jen.Id("body").Op("==").Nil()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return( + jen.Nil(), + jen.Qual(PackageNameErrors, "MissingRequiredFieldError"). + ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("Name").Op(":").Lit("Body")) + }), + )) + }), + ) + } + } + method, err := description.Method.ToString() + if err != nil { + return + } + group.Add( + jen.Id("req"). + Op(":="). + Qual(PackageNameHTTPClient, "Request"). + ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("Method").Op(":").Lit(method)) + group.Add(jen.Id("ServiceNames").Op(":").Id("serviceNames")) + group.Add(jen.Id("Path").Op(":").Id("path")) + group.Add(jen.Id("RawQuery").Op(":").Id("rawQuery")) + group.Add(jen.Id("Endpoints").Op(":").Id("options").Dot("OverwrittenEndpoints")) + group.Add(jen.Id("Region").Op(":").Id("options").Dot("OverwrittenRegion")) + if description.Request.HeaderNames != nil { + group.Add(jen.Id("Header").Op(":").Id("headers")) + } + switch description.Request.Authorization.ToAuthorization() { + case AuthorizationQbox: + group.Add(jen.Id("AuthType").Op(":").Qual(PackageNameAuth, "TokenQBox")) + group.Add(jen.Id("Credentials").Op(":").Id("innerRequest").Dot("Credentials")) + case AuthorizationQiniu: + group.Add(jen.Id("AuthType").Op(":").Qual(PackageNameAuth, "TokenQiniu")) + group.Add(jen.Id("Credentials").Op(":").Id("innerRequest").Dot("Credentials")) + case AuthorizationUpToken: + group.Add(jen.Id("UpToken").Op(":").Id("innerRequest").Dot("UpToken")) + } + if body := description.Response.Body; body != nil { + if json := body.Json; json != nil { + group.Add(jen.Id("BufferResponse").Op(":").True()) + } + } + if requestBody := description.Request.Body; requestBody != nil { + if json := requestBody.Json; json != nil { + group.Add(jen.Id("RequestBody").Op(":").Id("body")) + } else if formUrlencoded := requestBody.FormUrlencoded; formUrlencoded != nil { + group.Add( + jen.Id("RequestBody"). + Op(":"). + Qual(PackageNameHTTPClient, "GetFormRequestBody"). + Call(jen.Id("body")), + ) + } else if multipartFormData := requestBody.MultipartFormData; multipartFormData != nil { + group.Add( + jen.Id("RequestBody"). + Op(":"). + Qual(PackageNameHTTPClient, "GetMultipartFormRequestBody"). + Call(jen.Id("body")), + ) + } else if requestBody.BinaryData { + group.Add( + jen.Id("RequestBody"). + Op(":"). + Qual(PackageNameHTTPClient, "GetRequestBodyFromReadSeekCloser"). + Call(jen.Id("body")), + ) + } + } + }), + ) + group.Add( + jen.If( + jen.Id("options").Dot("OverwrittenEndpoints").Op("==").Nil().Op("&&"). + Id("options").Dot("OverwrittenRegion").Op("==").Nil().Op("&&"). + Id("storage").Dot("client").Dot("GetRegions").Call().Op("==").Nil()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Id("query").Op(":=").Id("storage").Dot("client").Dot("GetBucketQuery").Call()) + group.Add( + jen.If(jen.Id("query").Op("==").Nil()).BlockFunc(func(group *jen.Group) { + group.Add(jen.Id("bucketHosts").Op(":=").Qual(PackageNameHTTPClient, "DefaultBucketHosts").Call()) + if description.isBucketService() { + group.Add( + jen.If(jen.Id("options").Dot("OverwrittenBucketHosts").Op("!=").Nil()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Id("req").Dot("Endpoints").Op("=").Id("options").Dot("OverwrittenBucketHosts")) + }). + Else(). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Id("req").Dot("Endpoints").Op("=").Id("bucketHosts")) + }), + ) + } else { + group.Add(jen.Var().Id("err").Error()) + group.Add( + jen.If(jen.Id("options").Dot("OverwrittenBucketHosts").Op("!=").Nil()).BlockFunc(func(group *jen.Group) { + group.Add( + jen.If( + jen.List(jen.Id("bucketHosts"), jen.Err()). + Op("="). + Id("options"). + Dot("OverwrittenBucketHosts"). + Dot("GetEndpoints"). + Call(jen.Id("ctx")), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Nil(), jen.Err())) + }), + ) + }), + ) + group.Add( + jen.Id("queryOptions"). + Op(":="). + Qual(PackageNameRegion, "BucketRegionsQueryOptions"). + ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("UseInsecureProtocol").Op(":").Id("storage").Dot("client").Dot("UseInsecureProtocol").Call()) + group.Add(jen.Id("HostFreezeDuration").Op(":").Id("storage").Dot("client").Dot("GetHostFreezeDuration").Call()) + group.Add(jen.Id("Client").Op(":").Id("storage").Dot("client").Dot("GetClient").Call()) + }), + ) + group.Add( + jen.If( + jen.Id("hostRetryConfig").Op(":=").Id("storage").Dot("client").Dot("GetHostRetryConfig").Call(), + jen.Id("hostRetryConfig").Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Id("queryOptions").Dot("RetryMax").Op("=").Id("hostRetryConfig").Dot("RetryMax") + }), + ) + group.Add( + jen.If( + jen.List(jen.Id("query"), jen.Err()). + Op("="). + Qual(PackageNameRegion, "NewBucketRegionsQuery"). + Call(jen.Id("bucketHosts"), jen.Op("&").Id("queryOptions")), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Nil(), jen.Err())) + }), + ) + } + }), + ) + group.Add( + jen.If(jen.Id("query").Op("!=").Nil()).BlockFunc(func(group *jen.Group) { + group.Add(jen.Id("bucketName").Op(":=").Id("options").Dot("OverwrittenBucketName")) + group.Add(jen.Var().Id("accessKey").String()) + group.Add(jen.Var().Err().Error()) + if getBucketNameGenerated { + group.Add( + jen.If(jen.Id("bucketName").Op("==").Lit("")).BlockFunc(func(group *jen.Group) { + group.Add( + jen.If( + jen.List(jen.Id("bucketName"), jen.Err()).Op("=").Id("innerRequest").Dot("getBucketName").Call(jen.Id("ctx")), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Nil(), jen.Err())) + }), + ) + }), + ) + } + group.Add( + jen.If( + jen.List(jen.Id("accessKey"), jen.Err()).Op("=").Id("innerRequest").Dot("getAccessKey").Call(jen.Id("ctx")), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Return(jen.Nil(), jen.Err()) + }), + ) + group.Add( + jen.If(jen.Id("accessKey").Op("==").Lit("")). + BlockFunc(func(group *jen.Group) { + group.Add(jen.If( + jen.Id("credentialsProvider").Op(":=").Id("storage").Dot("client").Dot("GetCredentials").Call(), + jen.Id("credentialsProvider").Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.If( + jen.List(jen.Id("creds"), jen.Err()). + Op(":="). + Id("credentialsProvider"). + Dot("Get"). + Call(jen.Id("ctx")), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Return(jen.Nil(), jen.Err()) + }).Else(). + If(jen.Id("creds").Op("!=").Nil()). + BlockFunc(func(group *jen.Group) { + group.Id("accessKey").Op("=").Id("creds").Dot("AccessKey") + }) + })) + }), + ) + group.Add( + jen.If(jen.Id("accessKey").Op("!=").Lit("").Op("&&").Id("bucketName").Op("!=").Lit("")). + BlockFunc(func(group *jen.Group) { + group.Id("req").Dot("Region").Op("=").Id("query").Dot("Query").Call(jen.Id("accessKey"), jen.Id("bucketName")) + }), + ) + }), + ) + }), + ) + if body := description.Response.Body; body != nil { + if json := body.Json; json != nil { + if description.Request.responseTypeRequired { + group.Add(jen.Id("respBody").Op(":=").Id(reexportedResponseStructName).Values(jen.Id("Body").Op(":").Id("innerRequest").Dot("ResponseBody"))) + } else { + group.Add(jen.Var().Id("respBody").Id(reexportedResponseStructName)) + } + group.Add( + jen.If( + jen.Err(). + Op(":="). + Id("storage"). + Dot("client"). + Dot("DoAndAcceptJSON"). + Call( + jen.Id("ctx"), + jen.Op("&").Id("req"), + jen.Op("&").Id("respBody"), + ), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Return(jen.Nil(), jen.Err()) + }), + ) + group.Add(jen.Return(jen.Op("&").Id("respBody"), jen.Nil())) + } else if body.BinaryDataStream { + group.Add( + jen.List(jen.Id("resp"), jen.Err()). + Op(":="). + Id("storage"). + Dot("client"). + Dot("Do"). + Call( + jen.Id("ctx"), + jen.Op("&").Id("req"), + ), + ) + group.Add( + jen.If(jen.Err().Op("!=").Nil()). + BlockFunc(func(group *jen.Group) { + group.Return(jen.Nil(), jen.Err()) + }), + ) + group.Add( + jen.Return( + jen.Op("&"). + Id(reexportedResponseStructName). + Values(jen.Id("Body"). + Op(":"). + Id("resp"). + Dot("Body")), jen.Nil(), + ), + ) + } + } else { + group.Add( + jen.List(jen.Id("resp"), jen.Err()). + Op(":="). + Id("storage"). + Dot("client"). + Dot("Do"). + Call( + jen.Id("ctx"), + jen.Op("&").Id("req"), + ), + ) + group.Add( + jen.If(jen.Err().Op("!=").Nil()). + BlockFunc(func(group *jen.Group) { + group.Return(jen.Nil(), jen.Err()) + }), + ) + group.Add(jen.Return(jen.Op("&").Id(reexportedResponseStructName).Values(), jen.Id("resp").Dot("Body").Dot("Close").Call())) + } + }), + ) + return +} + +func (description *ApiDetailedDescription) generateGetBucketNameFunc(group *jen.Group, structName string) (ok bool, err error) { + if pp := description.Request.PathParams; pp != nil { + if ok, err = pp.addGetBucketNameFunc(group, structName); err != nil || ok { + return + } + } + if queryNames := description.Request.QueryNames; queryNames != nil { + if ok, err = queryNames.addGetBucketNameFunc(group, structName); err != nil || ok { + return + } + } + if headerNames := description.Request.HeaderNames; headerNames != nil { + if ok, err = headerNames.addGetBucketNameFunc(group, structName); err != nil || ok { + return + } + } + if authorization := description.Request.Authorization; authorization != nil { + if ok, err = authorization.addGetBucketNameFunc(group, structName); err != nil || ok { + return + } + } + if body := description.Request.Body; body != nil { + if json := body.Json; json != nil { + if jsonStruct := json.Struct; jsonStruct != nil { + if ok, err = jsonStruct.addGetBucketNameFunc(group, structName); err != nil || ok { + return + } + } + } else if formUrlencoded := body.FormUrlencoded; formUrlencoded != nil { + if ok, err = formUrlencoded.addGetBucketNameFunc(group, structName); err != nil || ok { + return + } + } else if multipartFormData := body.MultipartFormData; multipartFormData != nil { + if ok, err = multipartFormData.addGetBucketNameFunc(group, structName); err != nil || ok { + return + } + } + } + return +} + +func (description *ApiDetailedDescription) generateBuildFunc(group *jen.Group, structName string) (err error) { + if pp := description.Request.PathParams; pp != nil { + if err = pp.addBuildFunc(group, structName); err != nil { + return + } + } + if queryNames := description.Request.QueryNames; queryNames != nil { + if err = queryNames.addBuildFunc(group, structName); err != nil { + return + } + } + if headerNames := description.Request.HeaderNames; headerNames != nil { + if err = headerNames.addBuildFunc(group, structName); err != nil { + return + } + } + if body := description.Request.Body; body != nil { + if formUrlencoded := body.FormUrlencoded; formUrlencoded != nil { + if err = formUrlencoded.addBuildFunc(group, structName); err != nil { + return + } + } else if multipartFormData := body.MultipartFormData; multipartFormData != nil { + if err = multipartFormData.addBuildFunc(group, structName); err != nil { + return + } + } + } + return +} + +func (description *ApiDetailedDescription) addJsonMarshalerUnmarshaler(group *jen.Group, structName, actualPackageName, actualStructName string) (err error) { + group.Add( + jen.Func(). + Params(jen.Id("j").Op("*").Id(structName)). + Id("MarshalJSON"). + Params(). + Params(jen.Index().Byte(), jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Return(jen.Qual("encoding/json", "Marshal").Call(jen.Parens(jen.Op("*").Qual(actualPackageName, actualStructName)).Parens(jen.Id("j")))) + }), + ) + group.Add( + jen.Func(). + Params(jen.Id("j").Op("*").Id(structName)). + Id("UnmarshalJSON"). + Params(jen.Id("data").Index().Byte()). + Params(jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Return(jen.Qual("encoding/json", "Unmarshal").Call(jen.Id("data"), jen.Parens(jen.Op("*").Qual(actualPackageName, actualStructName)).Parens(jen.Id("j")))) + }), + ) + return +} + +func (description *ApiDetailedDescription) getBasePathSegments() []string { + basePath := strings.TrimPrefix(description.BasePath, "/") + basePath = strings.TrimSuffix(basePath, "/") + return strings.Split(basePath, "/") +} + +func (description *ApiDetailedDescription) getPathSuffixSegments() []string { + pathSuffix := strings.TrimPrefix(description.PathSuffix, "/") + pathSuffix = strings.TrimSuffix(pathSuffix, "/") + return strings.Split(pathSuffix, "/") +} + +func (description *ApiDetailedDescription) isBucketService() bool { + for _, service := range description.ServiceNames { + if service == ServiceNameBucket { + return true + } + } + return false +} diff --git a/storagev2/internal/api-generator/form.go b/storagev2/internal/api-generator/form.go new file mode 100644 index 00000000..b72ff489 --- /dev/null +++ b/storagev2/internal/api-generator/form.go @@ -0,0 +1,217 @@ +package main + +import ( + "errors" + "fmt" + + "github.com/dave/jennifer/jen" + "github.com/iancoleman/strcase" +) + +type ( + FormUrlencodedRequestStruct struct { + Fields []FormUrlencodedRequestField `yaml:"fields,omitempty"` + } + + FormUrlencodedRequestField struct { + FieldName string `yaml:"field_name,omitempty"` + FieldCamelCaseName string `yaml:"field_camel_case_name,omitempty"` + FieldSnakeCaseName string `yaml:"field_snake_case_name,omitempty"` + Key string `yaml:"key,omitempty"` + Documentation string `yaml:"documentation,omitempty"` + Type *StringLikeType `yaml:"type,omitempty"` + Multiple bool `yaml:"multiple,omitempty"` + Optional *OptionalType `yaml:"optional,omitempty"` + ServiceBucket *ServiceBucketType `yaml:"service_bucket,omitempty"` + } +) + +func (field *FormUrlencodedRequestField) camelCaseName() string { + if field.FieldCamelCaseName != "" { + return field.FieldCamelCaseName + } + return strcase.ToCamel(field.FieldName) +} + +func (form *FormUrlencodedRequestStruct) addFields(group *jen.Group) error { + for _, field := range form.Fields { + if err := form.generateField(group, field); err != nil { + return err + } + } + return nil +} + +func (form *FormUrlencodedRequestStruct) addGetBucketNameFunc(group *jen.Group, structName string) (bool, error) { + field := form.getServiceBucketField() + if field == nil || field.ServiceBucket.ToServiceBucketType() == ServiceBucketTypeNone { + return false, nil + } else if field.Multiple { + panic(fmt.Sprintf("multiple service bucket fields: %s", field.FieldName)) + } else if t := field.Type.ToStringLikeType(); t != StringLikeTypeString { + panic(fmt.Sprintf("service bucket field must be string: %s", t)) + } + group.Add(jen.Func(). + Params(jen.Id("form").Op("*").Id(structName)). + Id("getBucketName"). + Params(jen.Id("ctx").Qual("context", "Context")). + Params(jen.String(), jen.Error()). + BlockFunc(func(group *jen.Group) { + fieldName := field.camelCaseName() + switch field.ServiceBucket.ToServiceBucketType() { + case ServiceBucketTypePlainText: + group.Add(jen.Return(jen.Id("form").Dot(fieldName), jen.Nil())) + case ServiceBucketTypeEntry: + group.Add( + jen.Return( + jen.Qual("strings", "SplitN"). + Call(jen.Id("form").Dot(fieldName), jen.Lit(":"), jen.Lit(2)). + Index(jen.Lit(0)), + jen.Nil(), + ), + ) + case ServiceBucketTypeUploadToken: + group.Add( + jen.If(jen.Id("putPolicy"), jen.Err()). + Op(":="). + Qual(PackageNameUpToken, "NewParser"). + Call(jen.Id("form").Dot(fieldName)). + Dot("GetPutPolicy"). + Call(jen.Id("ctx")). + Op(";"). + Err(). + Op("!="). + Nil(). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Lit(""), jen.Err())) + }). + Else(). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Id("putPolicy").Dot("GetBucketName").Call())) + }), + ) + default: + panic("unknown ServiceBucketType") + } + })) + return true, nil +} + +func (form *FormUrlencodedRequestStruct) addBuildFunc(group *jen.Group, structName string) error { + var finalErr error = nil + group.Add( + jen.Func(). + Params(jen.Id("form").Op("*").Id(structName)). + Id("build"). + Params(). + Params(jen.Qual("net/url", "Values"), jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Id("formValues").Op(":=").Make(jen.Qual("net/url", "Values"))) + for _, field := range form.Fields { + if err := form.addSetCall(group, field, "form", "formValues"); err != nil { + finalErr = err + return + } + } + group.Add(jen.Return(jen.Id("formValues"), jen.Nil())) + }), + ) + return finalErr +} + +func (form *FormUrlencodedRequestStruct) getServiceBucketField() *FormUrlencodedRequestField { + var serviceBucketField *FormUrlencodedRequestField + + for i := range form.Fields { + if form.Fields[i].ServiceBucket.ToServiceBucketType() != ServiceBucketTypeNone { + if serviceBucketField == nil { + serviceBucketField = &form.Fields[i] + } else { + panic(fmt.Sprintf("multiple service bucket fields: %s & %s", form.Fields[i].FieldName, serviceBucketField.FieldName)) + } + } + } + return serviceBucketField +} + +func (form *FormUrlencodedRequestStruct) generateField(group *jen.Group, field FormUrlencodedRequestField) error { + code := jen.Id(field.camelCaseName()) + if field.Multiple { + code = code.Index() + } + code, err := field.Type.AddTypeToStatement(code, field.Optional.ToOptionalType() == OptionalTypeNullable) + if err != nil { + return err + } + if field.Documentation != "" { + code = code.Comment(field.Documentation) + } + group.Add(code) + return nil +} + +func (form *FormUrlencodedRequestStruct) addSetCall(group *jen.Group, field FormUrlencodedRequestField, formVarName, formValuesVarName string) error { + var ( + code, valueConvertCode *jen.Statement + err error + ) + fieldName := field.camelCaseName() + if field.Multiple { + if field.Optional.ToOptionalType() != OptionalTypeRequired { + return errors.New("multiple field must be required") + } + valueConvertCode, err = field.Type.GenerateConvertCodeToString(jen.Id("value")) + if err != nil { + return err + } + code = jen.If(jen.Len(jen.Id(formVarName).Dot(fieldName)).Op(">").Lit(0)). + BlockFunc(func(group *jen.Group) { + group.Add( + jen.For(jen.List(jen.Id("_"), jen.Id("value")).Op(":=").Range().Add(jen.Id(formVarName).Dot(fieldName))).BlockFunc(func(group *jen.Group) { + group.Add(jen.Id(formValuesVarName).Dot("Add").Call(jen.Lit(field.Key), valueConvertCode)) + }), + ) + }) + } else { + formField := jen.Id(formVarName).Dot(fieldName) + if field.Optional.ToOptionalType() == OptionalTypeNullable { + valueConvertCode, err = field.Type.GenerateConvertCodeToString(jen.Op("*").Add(formField)) + } else { + valueConvertCode, err = field.Type.GenerateConvertCodeToString(formField) + } + if err != nil { + return err + } + zeroValue, err := field.Type.ZeroValue() + if err != nil { + return err + } + condition := formField.Clone() + if field.Optional.ToOptionalType() == OptionalTypeNullable { + condition = condition.Op("!=").Nil() + } else if v, ok := zeroValue.(bool); !ok || v { + condition = condition.Op("!=").Lit(zeroValue) + } + switch field.Optional.ToOptionalType() { + case OptionalTypeOmitEmpty, OptionalTypeRequired, OptionalTypeNullable: + code = jen.If(condition).BlockFunc(func(group *jen.Group) { + group.Add(jen.Id(formValuesVarName).Dot("Set").Call(jen.Lit(field.Key), valueConvertCode)) + }) + case OptionalTypeKeepEmpty: + code = jen.Id(formValuesVarName).Dot("Set").Call(jen.Lit(field.Key), valueConvertCode) + } + } + if field.Optional.ToOptionalType() == OptionalTypeRequired { + code = code.Else().BlockFunc(func(group *jen.Group) { + group.Add(jen.Return( + jen.Nil(), + jen.Qual(PackageNameErrors, "MissingRequiredFieldError"). + ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("Name").Op(":").Lit(field.camelCaseName())) + }), + )) + }) + } + group.Add(code) + return nil +} diff --git a/storagev2/internal/api-generator/headers.go b/storagev2/internal/api-generator/headers.go new file mode 100644 index 00000000..932a2d53 --- /dev/null +++ b/storagev2/internal/api-generator/headers.go @@ -0,0 +1,102 @@ +package main + +import ( + "github.com/dave/jennifer/jen" + "github.com/iancoleman/strcase" +) + +type ( + HeaderName struct { + FieldName string `yaml:"field_name,omitempty"` + FieldCamelCaseName string `yaml:"field_camel_case_name,omitempty"` + FieldSnakeCaseName string `yaml:"field_snake_case_name,omitempty"` + HeaderName string `yaml:"header_name,omitempty"` + Documentation string `yaml:"documentation,omitempty"` + Optional *OptionalType `yaml:"optional,omitempty"` + } + HeaderNames []HeaderName +) + +func (name *HeaderName) camelCaseName() string { + if name.FieldCamelCaseName != "" { + return name.FieldCamelCaseName + } + return strcase.ToCamel(name.FieldName) +} + +func (names HeaderNames) addFields(group *jen.Group) error { + for _, headerName := range names { + code := jen.Id(headerName.camelCaseName()) + if headerName.Optional.ToOptionalType() == OptionalTypeNullable { + code = code.Op("*") + } + code = code.String() + if headerName.Documentation != "" { + code = code.Comment(headerName.Documentation) + } + group.Add(code) + } + return nil +} + +func (names HeaderNames) addGetBucketNameFunc(group *jen.Group, structName string) (bool, error) { + return false, nil +} + +func (names HeaderNames) addBuildFunc(group *jen.Group, structName string) error { + group.Add( + jen.Func(). + Params(jen.Id("headers").Op("*").Id(structName)). + Id("buildHeaders"). + Params(). + Params(jen.Qual("net/http", "Header"), jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Add( + jen.Id("allHeaders").Op(":=").Make(jen.Qual("net/http", "Header")), + ) + for _, headerName := range names { + fieldName := headerName.camelCaseName() + cond := jen.Id("headers").Dot(fieldName) + if headerName.Optional.ToOptionalType() == OptionalTypeNullable { + cond = cond.Op("!=").Nil() + } else { + cond = cond.Op("!=").Lit("") + } + setHeaderFunc := func(headerName, fieldName string) func(*jen.Group) { + return func(group *jen.Group) { + group.Add(jen.Id("allHeaders").Dot("Set").Call(jen.Lit(headerName), jen.Id("headers").Dot(fieldName))) + } + } + appendMissingRequiredFieldErrorFunc := func(fieldName string) func(group *jen.Group) { + return func(group *jen.Group) { + group.Add(jen.Return( + jen.Nil(), + jen.Qual(PackageNameErrors, "MissingRequiredFieldError"). + ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("Name").Op(":").Lit(fieldName)) + }), + )) + } + } + switch headerName.Optional.ToOptionalType() { + case OptionalTypeRequired: + group.Add( + jen.If(cond). + BlockFunc(setHeaderFunc(headerName.HeaderName, fieldName)). + Else(). + BlockFunc(appendMissingRequiredFieldErrorFunc(fieldName)), + ) + case OptionalTypeOmitEmpty, OptionalTypeNullable: + group.Add( + jen.If(cond). + BlockFunc(setHeaderFunc(headerName.HeaderName, fieldName)), + ) + case OptionalTypeKeepEmpty: + setHeaderFunc(headerName.HeaderName, fieldName)(group) + } + } + group.Add(jen.Return(jen.Id("allHeaders"), jen.Nil())) + }), + ) + return nil +} diff --git a/storagev2/internal/api-generator/json.go b/storagev2/internal/api-generator/json.go new file mode 100644 index 00000000..a3a99e91 --- /dev/null +++ b/storagev2/internal/api-generator/json.go @@ -0,0 +1,547 @@ +package main + +import ( + "errors" + "fmt" + + "github.com/dave/jennifer/jen" + "github.com/iancoleman/strcase" + "gopkg.in/yaml.v3" +) + +type ( + JsonType struct { + String bool + Integer bool + Float bool + Boolean bool + Array *JsonArray + Struct *JsonStruct + Any bool + StringMap bool + } + + JsonArray struct { + Type *JsonType `yaml:"type,omitempty"` + Name string `yaml:"name,omitempty"` + CamelCaseName string `yaml:"camel_case_name,omitempty"` + SnakeCaseName string `yaml:"snake_case_name,omitempty"` + Documentation string `yaml:"documentation,omitempty"` + } + + JsonStruct struct { + Fields []JsonField `yaml:"fields,omitempty"` + Name string `yaml:"name,omitempty"` + CamelCaseName string `yaml:"camel_case_name,omitempty"` + SnakeCaseName string `yaml:"snake_case_name,omitempty"` + Documentation string `yaml:"documentation,omitempty"` + } + + JsonField struct { + Type JsonType `yaml:"type,omitempty"` + Key string `yaml:"key,omitempty"` + FieldName string `yaml:"field_name,omitempty"` + FieldCamelCaseName string `yaml:"field_camel_case_name,omitempty"` + FieldSnakeCaseName string `yaml:"field_snake_case_name,omitempty"` + Documentation string `yaml:"documentation,omitempty"` + Optional *OptionalType `yaml:"optional,omitempty"` + ServiceBucket *ServiceBucketType `yaml:"service_bucket,omitempty"` + } +) + +func (jsonField *JsonField) camelCaseName() string { + if jsonField.FieldCamelCaseName != "" { + return jsonField.FieldCamelCaseName + } + return strcase.ToCamel(jsonField.FieldName) +} + +func (jsonStruct *JsonStruct) camelCaseName() string { + if jsonStruct.CamelCaseName != "" { + return jsonStruct.CamelCaseName + } + return strcase.ToCamel(jsonStruct.Name) +} + +func (jsonStruct *JsonStruct) addFields(group *jen.Group, includesJsonTag bool) error { + for _, field := range jsonStruct.Fields { + code, err := field.Type.AddTypeToStatement(jen.Id(field.camelCaseName()), field.Optional.ToOptionalType() == OptionalTypeNullable) + if err != nil { + return err + } + if includesJsonTag { + jsonTag := field.Key + switch field.Optional.ToOptionalType() { + case OptionalTypeOmitEmpty, OptionalTypeNullable: + jsonTag += ",omitempty" + } + code = code.Tag(map[string]string{"json": jsonTag}) + } + if field.Documentation != "" { + code = code.Comment(field.Documentation) + } + group.Add(code) + } + return nil +} + +func (jsonStruct *JsonStruct) addGetBucketNameFunc(group *jen.Group, structName string) (bool, error) { + field := jsonStruct.getServiceBucketField() + + if field == nil || field.ServiceBucket.ToServiceBucketType() == ServiceBucketTypeNone { + return false, nil + } else if !field.Type.String { + panic("service bucket field must be string") + } + + group.Add(jen.Func(). + Params(jen.Id("j").Op("*").Id(structName)). + Id("getBucketName"). + Params(jen.Id("ctx").Qual("context", "Context")). + Params(jen.String(), jen.Error()). + BlockFunc(func(group *jen.Group) { + fieldName := field.camelCaseName() + switch field.ServiceBucket.ToServiceBucketType() { + case ServiceBucketTypePlainText: + group.Add(jen.Return(jen.Id("j").Dot(fieldName), jen.Nil())) + case ServiceBucketTypeEntry: + group.Add( + jen.Return( + jen.Qual("strings", "SplitN"). + Call(jen.Id("j").Dot(fieldName), jen.Lit(":"), jen.Lit(2)). + Index(jen.Lit(0)), + jen.Nil(), + ), + ) + case ServiceBucketTypeUploadToken: + group.Add( + jen.If(jen.Id("putPolicy"), jen.Err()). + Op(":="). + Qual(PackageNameUpToken, "NewParser"). + Call(jen.Id("j").Dot(fieldName)). + Dot("GetPutPolicy"). + Call(jen.Id("ctx")). + Op(";"). + Err(). + Op("!="). + Nil(). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Lit(""), jen.Err())) + }). + Else(). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Id("putPolicy").Dot("GetBucketName").Call())) + }), + ) + default: + panic("unknown ServiceBucketType") + } + })) + return true, nil +} + +func (jsonType *JsonType) generate(group *jen.Group, options CodeGeneratorOptions) error { + return jsonType.generateType(group, options, true, func() error { + if jsonType.Any { + return jsonType.addAnyJsonMarshalerUnmarshaler(group, options.camelCaseName()) + } + return errors.New("base type could not be top level") + }) +} + +func (jsonType *JsonType) generateType(group *jen.Group, options CodeGeneratorOptions, topLevel bool, otherWise func() error) error { + if s := jsonType.Struct; s != nil { + if err := s.generate(group, options, topLevel); err != nil { + return err + } + return nil + } else if a := jsonType.Array; a != nil { + if err := a.generate(group, options, topLevel); err != nil { + return err + } + return nil + } + return otherWise() +} + +func (jsonType *JsonType) AddTypeToStatement(statement *jen.Statement, nilable bool) (*jen.Statement, error) { + if nilable { + statement = statement.Op("*") + } + if jsonType.String { + return statement.String(), nil + } else if jsonType.Integer { + return statement.Int64(), nil + } else if jsonType.Float { + return statement.Float64(), nil + } else if jsonType.Boolean { + return statement.Bool(), nil + } else if jsonType.Any { + return statement.Interface(), nil + } else if jsonType.StringMap { + return statement.Map(jen.String()).String(), nil + } else if jsonType.Array != nil { + return statement.Id(jsonType.Array.camelCaseName()), nil + } else if jsonType.Struct != nil { + return statement.Id(jsonType.Struct.camelCaseName()), nil + } else { + return nil, errors.New("unknown type") + } +} + +func (jsonType *JsonType) ZeroValue() interface{} { + if jsonType.String { + return "" + } else if jsonType.Integer { + return 0 + } else if jsonType.Float { + return 0.0 + } else if jsonType.Boolean { + return false + } else { + return nil + } +} + +func (jsonType *JsonType) addAnyJsonMarshalerUnmarshaler(group *jen.Group, structName string) (err error) { + group.Add( + jen.Func(). + Params(jen.Id("j").Op("*").Id(structName)). + Id("MarshalJSON"). + Params(). + Params(jen.Index().Byte(), jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return( + jen.Qual("encoding/json", "Marshal"). + Call(jen.Id("j").Dot("Body")), + )) + }), + ) + group.Add( + jen.Func(). + Params(jen.Id("j").Op("*").Id(structName)). + Id("UnmarshalJSON"). + Params(jen.Id("data").Index().Byte()). + Params(jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Return().Qual("encoding/json", "Unmarshal").Call(jen.Id("data"), jen.Op("&").Id("j").Dot("Body")) + }), + ) + return +} + +func (jsonArray *JsonArray) camelCaseName() string { + if jsonArray.CamelCaseName != "" { + return jsonArray.CamelCaseName + } + return strcase.ToCamel(jsonArray.Name) +} + +func (jsonArray *JsonArray) addFields(group *jen.Group) error { + code := jen.Id(jsonArray.camelCaseName()).Id(jsonArray.camelCaseName()) + if jsonArray.Documentation != "" { + code = code.Comment(jsonArray.Documentation) + } + group.Add(code) + return nil +} + +func (jsonArray *JsonArray) generate(group *jen.Group, options CodeGeneratorOptions, topLevel bool) (err error) { + if err = jsonArray.Type.generateType(group, CodeGeneratorOptions{}, false, func() error { + return nil + }); err != nil { + return + } + + if jsonArray.Documentation != "" { + group.Add(jen.Comment(jsonArray.Documentation)) + } + code := jen.Type().Id(jsonArray.camelCaseName()) + if !topLevel { + code = code.Op("=") + } + code = code.Index() + code, err = jsonArray.Type.AddTypeToStatement(code, false) + if err != nil { + return + } + group.Add(code) + if topLevel { + if err = jsonArray.addJsonMarshalerUnmarshaler(group, options.camelCaseName()); err != nil { + return + } + } + + return +} + +func (jsonArray *JsonArray) addJsonMarshalerUnmarshaler(group *jen.Group, structName string) (err error) { + fieldName := jsonArray.camelCaseName() + group.Add( + jen.Func(). + Params(jen.Id("j").Op("*").Id(structName)). + Id("MarshalJSON"). + Params(). + Params(jen.Index().Byte(), jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return( + jen.Qual("encoding/json", "Marshal"). + Call(jen.Id("j").Dot(fieldName)), + )) + }), + ) + group.Add( + jen.Func(). + Params(jen.Id("j").Op("*").Id(structName)). + Id("UnmarshalJSON"). + Params(jen.Id("data").Index().Byte()). + Params(jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Var().Id("array").Id(fieldName)) + group.Add( + jen.If( + jen.Err().Op(":=").Qual("encoding/json", "Unmarshal").Call(jen.Id("data"), jen.Op("&").Id("array")), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Return(jen.Err()) + }), + ) + group.Add(jen.Id("j").Dot(fieldName).Op("=").Id("array")) + group.Add(jen.Return(jen.Nil())) + }), + ) + return +} + +func (jsonStruct *JsonStruct) generate(group *jen.Group, options CodeGeneratorOptions, topLevel bool) (err error) { + for _, field := range jsonStruct.Fields { + if err = field.Type.generateType(group, CodeGeneratorOptions{ + Name: field.FieldName, + CamelCaseName: field.FieldCamelCaseName, + SnakeCaseName: field.FieldSnakeCaseName, + Documentation: field.Documentation, + }, false, func() error { + return nil + }); err != nil { + return + } + } + + opts := make([]CodeGeneratorOptions, 0, 2) + if options.camelCaseName() != "" { + opts = append(opts, options) + } + if jsonStruct.camelCaseName() != "" && options.camelCaseName() != jsonStruct.camelCaseName() { + opts = append(opts, CodeGeneratorOptions{ + Name: jsonStruct.Name, + CamelCaseName: jsonStruct.CamelCaseName, + SnakeCaseName: jsonStruct.SnakeCaseName, + Documentation: jsonStruct.Documentation, + }) + } + if len(opts) == 0 { + return errors.New("unknown struct name") + } + + if !topLevel { + if opts[0].Documentation != "" { + group.Add(jen.Comment(opts[0].Documentation)) + } + group.Add(jen.Type().Id(opts[0].camelCaseName()).StructFunc(func(group *jen.Group) { + err = jsonStruct.addFields(group, false) + })) + if err != nil { + return + } + } + + if len(opts) > 1 { + if opts[1].Documentation != "" { + group.Add(jen.Comment(opts[1].Documentation)) + } + group.Add(jen.Type().Id(opts[1].camelCaseName()).Op("=").Id(opts[0].camelCaseName())) + } + + if err = jsonStruct.addJsonMarshalerUnmarshaler(group, opts[0].camelCaseName()); err != nil { + return + } + if err = jsonStruct.generateValidateFunc(group, opts[0].camelCaseName()); err != nil { + return + } + + return +} + +func (jsonStruct *JsonStruct) addJsonMarshalerUnmarshaler(group *jen.Group, structName string) (err error) { + group.Add(jen.Type().Id("json" + structName).StructFunc(func(group *jen.Group) { + err = jsonStruct.addFields(group, true) + })) + if err != nil { + return + } + group.Add( + jen.Func(). + Params(jen.Id("j").Op("*").Id(structName)). + Id("MarshalJSON"). + Params(). + Params(jen.Index().Byte(), jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.If( + jen.Err().Op(":=").Id("j").Dot("validate").Call(), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Return(jen.Nil(), jen.Err()) + })) + group.Add(jen.Return( + jen.Qual("encoding/json", "Marshal"). + Call( + jen.Op("&").Id("json" + structName). + ValuesFunc(func(group *jen.Group) { + for _, field := range jsonStruct.Fields { + fieldName := field.camelCaseName() + group.Add(jen.Id(fieldName).Op(":").Id("j").Dot(fieldName)) + } + }), + ), + )) + }), + ) + group.Add( + jen.Func(). + Params(jen.Id("j").Op("*").Id(structName)). + Id("UnmarshalJSON"). + Params(jen.Id("data").Index().Byte()). + Params(jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Var().Id("nj").Id("json" + structName)) + group.Add( + jen.If( + jen.Err().Op(":=").Qual("encoding/json", "Unmarshal").Call(jen.Id("data"), jen.Op("&").Id("nj")), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Return(jen.Err()) + }), + ) + for _, field := range jsonStruct.Fields { + fieldName := field.camelCaseName() + group.Add(jen.Id("j").Dot(fieldName).Op("=").Id("nj").Dot(fieldName)) + } + group.Add(jen.Return(jen.Nil())) + }), + ) + return +} + +func (jsonStruct *JsonStruct) generateValidateFunc(group *jen.Group, structName string) error { + group.Add(jen.Func(). + Params(jen.Id("j").Op("*").Id(structName)). + Id("validate"). + Params(). + Params(jen.Error()). + BlockFunc(func(group *jen.Group) { + for _, field := range jsonStruct.Fields { + if field.Optional.ToOptionalType() == OptionalTypeRequired { + var cond *jen.Statement + fieldName := field.camelCaseName() + if field.Type.String || field.Type.Integer || field.Type.Float { + cond = jen.Id("j").Dot(fieldName).Op("==").Lit(field.Type.ZeroValue()) + } else if field.Type.Boolean { + // do nothing + } else if field.Type.Array != nil { + cond = jen.Len(jen.Id("j").Dot(fieldName)).Op("==").Lit(0) + } else if field.Type.Struct != nil { + // do nothing + } else { + cond = jen.Id("j").Dot(fieldName).Op("==").Nil() + } + if cond != nil { + group.Add(jen.If(cond).BlockFunc(func(group *jen.Group) { + group.Add(jen.Return( + jen.Qual(PackageNameErrors, "MissingRequiredFieldError"). + ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("Name").Op(":").Lit(fieldName)) + }), + )) + })) + } + if arrayField := field.Type.Array; arrayField != nil { + if arrayField.Type.Struct != nil { + group.Add( + jen.For(jen.List(jen.Id("_"), jen.Id("value")).Op(":=").Range().Id("j").Dot(fieldName)). + BlockFunc(func(group *jen.Group) { + group.Add( + jen.If( + jen.Err().Op(":=").Id("value").Dot("validate").Call(), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Return(jen.Err()) + }), + ) + }), + ) + } + } else if field.Type.Struct != nil { + group.Add( + jen.If( + jen.Err().Op(":=").Id("j").Dot(fieldName).Dot("validate").Call(), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Return(jen.Err()) + }), + ) + } + } + } + group.Add(jen.Return(jen.Nil())) + })) + return nil +} + +func (jsonStruct *JsonStruct) getServiceBucketField() *JsonField { + var serviceBucketField *JsonField = nil + + for i := range jsonStruct.Fields { + if jsonStruct.Fields[i].ServiceBucket.ToServiceBucketType() != ServiceBucketTypeNone { + if serviceBucketField == nil { + serviceBucketField = &jsonStruct.Fields[i] + } else { + panic("multiple service bucket fields") + } + } + } + return serviceBucketField +} + +func (jsonType *JsonType) UnmarshalYAML(value *yaml.Node) error { + switch value.ShortTag() { + case "!!str": + switch value.Value { + case "string": + jsonType.String = true + case "integer": + jsonType.Integer = true + case "float": + jsonType.Float = true + case "boolean": + jsonType.Boolean = true + case "any": + jsonType.Any = true + case "string_map": + jsonType.StringMap = true + default: + return fmt.Errorf("unknown json type: %s", value.Value) + } + return nil + case "!!map": + switch value.Content[0].Value { + case "array": + return value.Content[1].Decode(&jsonType.Array) + case "struct": + return value.Content[1].Decode(&jsonType.Struct) + default: + return fmt.Errorf("unknown json type: %s", value.Content[0].Value) + } + default: + return fmt.Errorf("unknown json type: %s", value.ShortTag()) + } +} diff --git a/storagev2/internal/api-generator/main.go b/storagev2/internal/api-generator/main.go new file mode 100644 index 00000000..1ef17e4a --- /dev/null +++ b/storagev2/internal/api-generator/main.go @@ -0,0 +1,162 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + + "github.com/dave/jennifer/jen" + "gopkg.in/yaml.v3" +) + +const ( + PackageNameHTTPClient = "github.com/qiniu/go-sdk/v7/storagev2/http_client" + PackageNameAuth = "github.com/qiniu/go-sdk/v7/auth" + PackageNameCredentials = "github.com/qiniu/go-sdk/v7/storagev2/credentials" + PackageNameRegion = "github.com/qiniu/go-sdk/v7/storagev2/region" + PackageNameUpToken = "github.com/qiniu/go-sdk/v7/storagev2/uptoken" + PackageNameErrors = "github.com/qiniu/go-sdk/v7/storagev2/errors" + PackageNameApis = "github.com/qiniu/go-sdk/v7/storagev2/apis" + PackageNameInternalIo = "github.com/qiniu/go-sdk/v7/internal/io" +) + +func main() { + rootProjectPath, err := os.Getwd() + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to get working directory: %s\n", err) + os.Exit(1) + } + + storageApiSpecsPath := filepath.Join(rootProjectPath, "..", "api-specs", "storage") + storageGeneratedDirPath := filepath.Join(rootProjectPath, "apis") + + os.RemoveAll(storageGeneratedDirPath) + + storageApiSpecEntries, err := ioutil.ReadDir(storageApiSpecsPath) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to read directory %s: %s\n", storageApiSpecsPath, err) + os.Exit(1) + } + + for _, storageApiSpecEntry := range storageApiSpecEntries { + apiSpecName := extractApiSpecName(storageApiSpecEntry.Name()) + apiSpecPath := filepath.Join(storageApiSpecsPath, storageApiSpecEntry.Name()) + generatedDirPath := filepath.Join(storageGeneratedDirPath, apiSpecName) + if err = os.MkdirAll(generatedDirPath, 0755); err != nil { + fmt.Fprintf(os.Stderr, "Failed to create directory %s: %s\n", generatedDirPath, err) + os.Exit(1) + } + if err = writeGolangPackages(apiSpecName, apiSpecPath, storageGeneratedDirPath); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write go package %s: %s\n", apiSpecName, err) + os.Exit(1) + } + } + if err = writeApiClient(storageGeneratedDirPath); err != nil { + fmt.Fprintf(os.Stderr, "Failed to write api client: %s\n", err) + os.Exit(1) + } + if err = goBuildPackage("./apis/..."); err != nil { + fmt.Fprintf(os.Stderr, "Failed to build go package ./apis/...: %s\n", err) + os.Exit(1) + } +} + +func writeGolangPackages(apiSpecName, apiSpecPath, storageGeneratedDirPath string) (err error) { + generatedDirPath := filepath.Join(storageGeneratedDirPath, apiSpecName) + apiSpecFile, err := os.Open(apiSpecPath) + if err != nil { + return + } + defer apiSpecFile.Close() + + var apiSpec ApiDetailedDescription + decoder := yaml.NewDecoder(apiSpecFile) + decoder.KnownFields(true) + if err = decoder.Decode(&apiSpec); err != nil { + return + } + if err = apiSpecFile.Close(); err != nil { + return + } + + if err = writeSubPackage(apiSpecName, generatedDirPath, &apiSpec); err != nil { + return + } + return writeApiPackage(apiSpecName, storageGeneratedDirPath, &apiSpec) +} + +func writeSubPackage(apiSpecName, generatedDirPath string, apiSpec *ApiDetailedDescription) error { + packageFile := jen.NewFile(apiSpecName) + packageFile.HeaderComment("THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY!") + packageFile.PackageComment(apiSpec.Documentation) + if err := apiSpec.generateSubPackages(packageFile.Group, CodeGeneratorOptions{ + Name: apiSpecName, + CamelCaseName: apiSpec.CamelCaseName, + SnakeCaseName: apiSpec.SnakeCaseName, + Documentation: apiSpec.Documentation, + }); err != nil { + return err + } + return packageFile.Save(filepath.Join(generatedDirPath, "api.go")) +} + +func writeApiPackage(apiSpecName, storageGeneratedDirPath string, apiSpec *ApiDetailedDescription) error { + apisPackageFile := jen.NewFile("apis") + apisPackageFile.HeaderComment("THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY!") + if err := apiSpec.generatePackage(apisPackageFile.Group, CodeGeneratorOptions{ + Name: apiSpecName, + CamelCaseName: apiSpec.CamelCaseName, + SnakeCaseName: apiSpec.SnakeCaseName, + Documentation: apiSpec.Documentation, + }); err != nil { + return err + } + return apisPackageFile.Save(filepath.Join(storageGeneratedDirPath, "api_"+apiSpecName+".go")) +} + +func goBuildPackage(packagePath string) error { + cmd := exec.Command("go", "build", packagePath) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} + +func writeApiClient(storageGeneratedDirPath string) error { + apiPackageFile := jen.NewFile("apis") + apiPackageFile.HeaderComment("THIS FILE IS GENERATED BY api-generator, DO NOT EDIT DIRECTLY!") + generateApiClient(apiPackageFile.Group) + return apiPackageFile.Save(filepath.Join(storageGeneratedDirPath, "apis.go")) +} + +func generateApiClient(group *jen.Group) { + group.Add(jen.Comment("API 客户端")) + group.Add( + jen.Type().Id("Storage").StructFunc(func(group *jen.Group) { + group.Add(jen.Id("client").Op("*").Qual(PackageNameHTTPClient, "Client")) + }), + ) + group.Add(jen.Comment("创建 API 客户端")) + group.Add( + jen.Func(). + Id("NewStorage"). + Params(jen.Id("options").Op("*").Qual(PackageNameHTTPClient, "Options")). + Params(jen.Op("*").Id("Storage")). + BlockFunc(func(group *jen.Group) { + group.Return(jen.Op("&").Id("Storage").ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("client").Op(":").Qual(PackageNameHTTPClient, "NewClient").Call(jen.Id("options"))) + })) + }), + ) + + group.Add(jen.Comment("API 客户端选项")) + group.Add( + jen.Type().Id("Options").StructFunc(func(group *jen.Group) { + group.Add(jen.Id("OverwrittenBucketHosts").Qual(PackageNameRegion, "EndpointsProvider")) + group.Add(jen.Id("OverwrittenBucketName").String()) + group.Add(jen.Id("OverwrittenEndpoints").Qual(PackageNameRegion, "EndpointsProvider")) + group.Add(jen.Id("OverwrittenRegion").Qual(PackageNameRegion, "RegionsProvider")) + }), + ) +} diff --git a/storagev2/internal/api-generator/multipart.go b/storagev2/internal/api-generator/multipart.go new file mode 100644 index 00000000..5f3a0dc6 --- /dev/null +++ b/storagev2/internal/api-generator/multipart.go @@ -0,0 +1,239 @@ +package main + +import ( + "fmt" + + "github.com/dave/jennifer/jen" + "github.com/iancoleman/strcase" +) + +type ( + MultipartFormFields struct { + Named []NamedMultipartFormField `yaml:"named_fields,omitempty"` + Free *FreeMultipartFormFields `yaml:"free_fields,omitempty"` + } + + NamedMultipartFormField struct { + FieldName string `yaml:"field_name,omitempty"` + FieldCamelCaseName string `yaml:"field_camel_case_name,omitempty"` + FieldSnakeCaseName string `yaml:"field_snake_case_name,omitempty"` + Key string `yaml:"key,omitempty"` + Type *MultipartFormDataType `yaml:"type,omitempty"` + Documentation string `yaml:"documentation,omitempty"` + ServiceBucket *ServiceBucketType `yaml:"service_bucket,omitempty"` + Optional *OptionalType `yaml:"optional,omitempty"` + } + + FreeMultipartFormFields struct { + FieldName string `yaml:"field_name,omitempty"` + FieldCamelCaseName string `yaml:"field_camel_case_name,omitempty"` + FieldSnakeCaseName string `yaml:"field_snake_case_name,omitempty"` + Documentation string `yaml:"documentation,omitempty"` + } +) + +func (field *NamedMultipartFormField) camelCaseName() string { + if field.FieldCamelCaseName != "" { + return field.FieldCamelCaseName + } + return strcase.ToCamel(field.FieldName) +} + +func (field *FreeMultipartFormFields) camelCaseName() string { + if field.FieldCamelCaseName != "" { + return field.FieldCamelCaseName + } + return strcase.ToCamel(field.FieldName) +} + +func (mff *MultipartFormFields) addFields(group *jen.Group) error { + for _, named := range mff.Named { + fieldName := named.camelCaseName() + code, err := named.Type.AddTypeToStatement(jen.Id(fieldName), named.Optional.ToOptionalType() == OptionalTypeNullable) + if err != nil { + return err + } + group.Add(code) + } + if free := mff.Free; free != nil { + group.Add(jen.Id(free.camelCaseName()).Map(jen.String()).String()) + } + return nil +} + +func (mff *MultipartFormFields) addGetBucketNameFunc(group *jen.Group, structName string) (bool, error) { + field := mff.getServiceBucketField() + + if field == nil || field.ServiceBucket.ToServiceBucketType() == ServiceBucketTypeNone { + return false, nil + } + + group.Add(jen.Func(). + Params(jen.Id("form").Op("*").Id(structName)). + Id("getBucketName"). + Params(jen.Id("ctx").Qual("context", "Context")). + Params(jen.String(), jen.Error()). + BlockFunc(func(group *jen.Group) { + fieldName := field.camelCaseName() + switch field.ServiceBucket.ToServiceBucketType() { + case ServiceBucketTypePlainText: + group.Add(jen.Return(jen.Id("form").Dot(fieldName), jen.Nil())) + case ServiceBucketTypeEntry: + group.Add( + jen.Return( + jen.Qual("strings", "SplitN"). + Call(jen.Id("form").Dot(fieldName), jen.Lit(":"), jen.Lit(2)). + Index(jen.Lit(0)), + jen.Nil(), + ), + ) + case ServiceBucketTypeUploadToken: + group.Add( + jen.Id("putPolicy"). + Op(","). + Err(). + Op(":="). + Id("form"). + Dot(fieldName). + Dot("GetPutPolicy"). + Call(jen.Id("ctx")), + ) + group.Add( + jen.If(jen.Err().Op("!=").Nil()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Lit(""), jen.Err())) + }). + Else(). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Id("putPolicy").Dot("GetBucketName").Call())) + }), + ) + default: + panic("unknown ServiceBucketType") + } + })) + return true, nil +} + +func (mff *MultipartFormFields) addBuildFunc(group *jen.Group, structName string) error { + var err error + group.Add( + jen.Func(). + Params(jen.Id("form").Op("*").Id(structName)). + Id("build"). + Params(jen.Id("ctx").Qual("context", "Context")). + Params(jen.Op("*").Qual(PackageNameHTTPClient, "MultipartForm"), jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Add( + jen.Id("multipartForm"). + Op(":="). + New(jen.Qual(PackageNameHTTPClient, "MultipartForm")), + ) + for _, named := range mff.Named { + var zeroValue interface{} + if named.Optional.ToOptionalType() != OptionalTypeNullable { + zeroValue, err = named.Type.ZeroValue() + } + if err != nil { + return + } + fieldName := named.camelCaseName() + field := jen.Id("form").Dot(fieldName) + var cond *jen.Statement + if named.Type.ToMultipartFormDataType() == MultipartFormDataTypeBinaryData { + cond = field.Clone().Dot("Data").Op("!=").Nil() + } else if zeroValue == nil { + cond = field.Clone().Op("!=").Nil() + } else { + cond = field.Clone().Op("!=").Lit(zeroValue) + } + if named.Optional.ToOptionalType() == OptionalTypeNullable { + field = jen.Op("*").Add(field) + } + code := jen.If(cond).BlockFunc(func(group *jen.Group) { + switch named.Type.ToMultipartFormDataType() { + case MultipartFormDataTypeString: + group.Add(jen.Id("multipartForm").Dot("SetValue").Call( + jen.Lit(named.Key), + field, + )) + case MultipartFormDataTypeInteger: + group.Add(jen.Id("multipartForm").Dot("SetValue").Call( + jen.Lit(named.Key), + jen.Qual("strconv", "FormatInt").Call(field, jen.Lit(10)), + )) + case MultipartFormDataTypeUploadToken: + group.Add( + jen.Id("upToken"). + Op(","). + Err(). + Op(":="). + Add(field). + Dot("GetUpToken"). + Call(jen.Id("ctx")), + ) + group.Add( + jen.If(jen.Err().Op("!=").Nil()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Nil(), jen.Err())) + }), + ) + group.Add(jen.Id("multipartForm").Dot("SetValue").Call( + jen.Lit(named.Key), + jen.Id("upToken"), + )) + case MultipartFormDataTypeBinaryData: + group.Add(jen.If(jen.Id("form").Dot(fieldName).Dot("Name").Op("==").Lit("").BlockFunc(func(group *jen.Group) { + group.Add(jen.Return( + jen.Nil(), + jen.Qual(PackageNameErrors, "MissingRequiredFieldError"). + ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("Name").Op(":").Lit(fieldName + ".Name")) + }), + )) + }))) + group.Add(jen.Id("multipartForm").Dot("SetFile").Call( + jen.Lit(named.Key), + jen.Id("form").Dot(fieldName).Dot("Name"), + jen.Id("form").Dot(fieldName).Dot("Data"), + )) + } + }) + if named.Optional.ToOptionalType() == OptionalTypeRequired { + code = code.Else().BlockFunc(func(group *jen.Group) { + group.Add(jen.Return( + jen.Nil(), + jen.Qual(PackageNameErrors, "MissingRequiredFieldError"). + ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("Name").Op(":").Lit(fieldName)) + }), + )) + }) + } + group.Add(code) + } + if free := mff.Free; free != nil { + group.Add(jen.For(jen.List(jen.Id("key"), jen.Id("value")).Op(":=").Range().Id("form").Dot(free.camelCaseName())).BlockFunc(func(group *jen.Group) { + group.Add(jen.Id("multipartForm").Dot("SetValue").Call(jen.Id("key"), jen.Id("value"))) + })) + } + group.Add(jen.Return(jen.Id("multipartForm"), jen.Nil())) + }), + ) + return err +} + +func (mff *MultipartFormFields) getServiceBucketField() *NamedMultipartFormField { + var serviceBucketField *NamedMultipartFormField + + for i := range mff.Named { + if mff.Named[i].ServiceBucket.ToServiceBucketType() != ServiceBucketTypeNone { + if serviceBucketField == nil { + serviceBucketField = &mff.Named[i] + } else { + panic(fmt.Sprintf("multiple service bucket fields: %s & %s", mff.Named[i].FieldName, serviceBucketField.FieldName)) + } + } + } + return serviceBucketField +} diff --git a/storagev2/internal/api-generator/path.go b/storagev2/internal/api-generator/path.go new file mode 100644 index 00000000..c153eb9f --- /dev/null +++ b/storagev2/internal/api-generator/path.go @@ -0,0 +1,284 @@ +package main + +import ( + "errors" + "fmt" + + "github.com/dave/jennifer/jen" + "github.com/iancoleman/strcase" +) + +type ( + PathParams struct { + Named []NamedPathParam `yaml:"named,omitempty"` + Free *FreePathParams `yaml:"free,omitempty"` + } + + NamedPathParam struct { + PathSegment string `yaml:"path_segment,omitempty"` + FieldName string `yaml:"field_name,omitempty"` + FieldCamelCaseName string `yaml:"field_camel_case_name,omitempty"` + FieldSnakeCaseName string `yaml:"field_snake_case_name,omitempty"` + Type *StringLikeType `yaml:"type,omitempty"` + Documentation string `yaml:"documentation,omitempty"` + Encode *EncodeType `yaml:"encode,omitempty"` + ServiceBucket *ServiceBucketType `yaml:"service_bucket,omitempty"` + ServiceObject *ServiceObjectType `yaml:"service_object,omitempty"` + Optional *OptionalType `yaml:"optional,omitempty"` + } + + FreePathParams struct { + FieldName string `yaml:"field_name,omitempty"` + FieldCamelCaseName string `yaml:"field_camel_case_name,omitempty"` + FieldSnakeCaseName string `yaml:"field_snake_case_name,omitempty"` + Documentation string `yaml:"documentation,omitempty"` + EncodeParamKey *EncodeType `yaml:"encode_param_key"` + EncodeParamValue *EncodeType `yaml:"encode_param_value"` + } +) + +func (pp *NamedPathParam) camelCaseName() string { + if pp.FieldCamelCaseName != "" { + return pp.FieldCamelCaseName + } + return strcase.ToCamel(pp.FieldName) +} + +func (fpp *FreePathParams) camelCaseName() string { + if fpp.FieldCamelCaseName != "" { + return fpp.FieldCamelCaseName + } + return strcase.ToCamel(fpp.FieldName) +} + +func (pp *PathParams) addFields(group *jen.Group) error { + for _, namedPathParam := range pp.Named { + nilable := namedPathParam.Encode.ToEncodeType() == EncodeTypeUrlsafeBase64OrNone || namedPathParam.Optional.ToOptionalType() == OptionalTypeNullable + code, err := namedPathParam.Type.AddTypeToStatement(jen.Id(namedPathParam.camelCaseName()), nilable) + if err != nil { + return err + } + if namedPathParam.Documentation != "" { + code = code.Comment(namedPathParam.Documentation) + } + group.Add(code) + } + if free := pp.Free; free != nil { + freeFieldName := free.camelCaseName() + code := jen.Id(freeFieldName).Map(jen.String()).String() + if free.Documentation != "" { + code = code.Comment(free.Documentation) + } + group.Add(code) + } + return nil +} + +func (pp *PathParams) addGetBucketNameFunc(group *jen.Group, structName string) (bool, error) { + field := pp.getServiceBucketField() + if field == nil || field.ServiceBucket.ToServiceBucketType() == ServiceBucketTypeNone { + return false, nil + } else if field.Type.ToStringLikeType() != StringLikeTypeString { + panic(fmt.Sprintf("service bucket field must be string: %s", field.FieldName)) + } + group.Add(jen.Func(). + Params(jen.Id("pp").Op("*").Id(structName)). + Id("getBucketName"). + Params(jen.Id("ctx").Qual("context", "Context")). + Params(jen.String(), jen.Error()). + BlockFunc(func(group *jen.Group) { + fieldName := field.camelCaseName() + switch field.ServiceBucket.ToServiceBucketType() { + case ServiceBucketTypePlainText: + group.Add(jen.Return(jen.Id("pp").Dot(fieldName), jen.Nil())) + case ServiceBucketTypeEntry: + group.Add( + jen.Return( + jen.Qual("strings", "SplitN"). + Call(jen.Id("pp").Dot(fieldName), jen.Lit(":"), jen.Lit(2)). + Index(jen.Lit(0)), + jen.Nil(), + ), + ) + case ServiceBucketTypeUploadToken: + group.Add( + jen.If(jen.Id("putPolicy"), jen.Err()). + Op(":="). + Qual(PackageNameUpToken, "NewParser"). + Call(jen.Id("pp").Dot(fieldName)). + Dot("GetPutPolicy"). + Call(jen.Id("ctx")). + Op(";"). + Err(). + Op("!="). + Nil(). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Lit(""), jen.Err())) + }). + Else(). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Id("putPolicy").Dot("GetBucketName").Call())) + }), + ) + default: + panic("unknown ServiceBucketType") + } + })) + return true, nil +} + +func (pp *PathParams) addBuildFunc(group *jen.Group, structName string) error { + var err error + group.Add( + jen.Func(). + Params(jen.Id("path").Op("*").Id(structName)). + Id("buildPath"). + Params(). + Params(jen.Index().Add(jen.String()), jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Var().Id("allSegments").Index().Add(jen.String())) + for _, namedPathParam := range pp.Named { + var ( + code jen.Code + urlSafeBase64IsNone = namedPathParam.Encode.ToEncodeType() == EncodeTypeUrlsafeBase64OrNone + nilable = urlSafeBase64IsNone || namedPathParam.Optional.ToOptionalType() == OptionalTypeNullable + fieldName = namedPathParam.camelCaseName() + field = jen.Id("path").Dot(fieldName) + unreferencedField = field.Clone() + ) + if nilable { + unreferencedField = jen.Op("*").Add(unreferencedField) + } + switch namedPathParam.Type.ToStringLikeType() { + case StringLikeTypeString: + switch namedPathParam.Encode.ToEncodeType() { + case EncodeTypeNone: + code = unreferencedField.Clone() + case EncodeTypeUrlsafeBase64, EncodeTypeUrlsafeBase64OrNone: + code = jen.Qual("encoding/base64", "URLEncoding").Dot("EncodeToString").Call(jen.Index().Byte().Parens(unreferencedField.Clone())) + } + case StringLikeTypeInteger, StringLikeTypeFloat, StringLikeTypeBoolean: + code, _ = namedPathParam.Type.GenerateConvertCodeToString(unreferencedField.Clone()) + default: + err = errors.New("unknown type") + return + } + zeroValue, e := namedPathParam.Type.ZeroValue() + if e != nil { + err = e + return + } + condition := field.Clone() + if nilable { + condition = condition.Op("!=").Nil() + } else if v, ok := zeroValue.(bool); !ok || v { + condition = condition.Op("!=").Lit(zeroValue) + } + appendPathSegment := func(pathSegment string, value jen.Code) func(group *jen.Group) { + return func(group *jen.Group) { + codes := []jen.Code{jen.Id("allSegments")} + if pathSegment != "" { + codes = append(codes, jen.Lit(pathSegment)) + } + codes = append(codes, value) + group.Add( + jen.Id("allSegments").Op("=").Append(codes...), + ) + } + } + appendMissingRequiredFieldErrorFunc := func(fieldName string) func(group *jen.Group) { + return func(group *jen.Group) { + group.Add(jen.Return( + jen.Nil(), + jen.Qual(PackageNameErrors, "MissingRequiredFieldError"). + ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("Name").Op(":").Lit(fieldName)) + }), + )) + } + } + + if urlSafeBase64IsNone { + group.Add( + jen.If(condition). + BlockFunc(appendPathSegment(namedPathParam.PathSegment, code)). + Else(). + BlockFunc(appendPathSegment(namedPathParam.PathSegment, jen.Lit("~"))), + ) + } else { + switch namedPathParam.Optional.ToOptionalType() { + case OptionalTypeRequired: + group.Add( + jen.If(condition). + BlockFunc(appendPathSegment(namedPathParam.PathSegment, code)). + Else(). + BlockFunc(appendMissingRequiredFieldErrorFunc(fieldName)), + ) + case OptionalTypeOmitEmpty, OptionalTypeNullable: + group.Add( + jen.If(condition). + BlockFunc(appendPathSegment(namedPathParam.PathSegment, code)), + ) + case OptionalTypeKeepEmpty: + appendPathSegment(namedPathParam.PathSegment, code)(group) + } + } + } + if free := pp.Free; free != nil { + freeFieldName := free.camelCaseName() + group.Add( + jen.For( + jen.Id("key"). + Op(","). + Id("value"). + Op(":="). + Range(). + Id("path"). + Dot(freeFieldName)). + BlockFunc(func(group *jen.Group) { + var keyCode, valueCode jen.Code + switch free.EncodeParamKey.ToEncodeType() { + case EncodeTypeNone: + keyCode = jen.Id("key") + case EncodeTypeUrlsafeBase64, EncodeTypeUrlsafeBase64OrNone: + keyCode = jen.Qual("encoding/base64", "URLEncoding"). + Dot("EncodeToString"). + Call(jen.Index().Byte().Parens(jen.Id("key"))) + } + group.Add( + jen.Id("allSegments").Op("=").Append(jen.Id("allSegments"), keyCode), + ) + switch free.EncodeParamValue.ToEncodeType() { + case EncodeTypeNone: + valueCode = jen.Id("value") + case EncodeTypeUrlsafeBase64, EncodeTypeUrlsafeBase64OrNone: + valueCode = jen.Qual("encoding/base64", "URLEncoding"). + Dot("EncodeToString"). + Call(jen.Index().Byte().Parens(jen.Id("value"))) + } + group.Add( + jen.Id("allSegments").Op("=").Append(jen.Id("allSegments"), valueCode), + ) + }), + ) + } + group.Add(jen.Return(jen.Id("allSegments"), jen.Nil())) + }), + ) + return err +} + +func (pp *PathParams) getServiceBucketField() *NamedPathParam { + var serviceBucketField *NamedPathParam + + for i := range pp.Named { + if pp.Named[i].ServiceBucket.ToServiceBucketType() != ServiceBucketTypeNone { + if serviceBucketField == nil { + serviceBucketField = &pp.Named[i] + } else { + panic(fmt.Sprintf("multiple service bucket fields: %s & %s", pp.Named[i].FieldName, serviceBucketField.FieldName)) + } + } + } + return serviceBucketField +} diff --git a/storagev2/internal/api-generator/query.go b/storagev2/internal/api-generator/query.go new file mode 100644 index 00000000..8ef3a513 --- /dev/null +++ b/storagev2/internal/api-generator/query.go @@ -0,0 +1,195 @@ +package main + +import ( + "errors" + "fmt" + + "github.com/dave/jennifer/jen" + "github.com/iancoleman/strcase" +) + +type ( + QueryName struct { + FieldName string `yaml:"field_name,omitempty"` + FieldCamelCaseName string `yaml:"field_camel_case_name,omitempty"` + FieldSnakeCaseName string `yaml:"field_snake_case_name,omitempty"` + QueryName string `yaml:"query_name,omitempty"` + Documentation string `yaml:"documentation,omitempty"` + QueryType *StringLikeType `yaml:"query_type,omitempty"` + ServiceBucket *ServiceBucketType `yaml:"service_bucket,omitempty"` + Optional *OptionalType `yaml:"optional,omitempty"` + } + QueryNames []QueryName +) + +func (name *QueryName) camelCaseName() string { + if name.FieldCamelCaseName != "" { + return name.FieldCamelCaseName + } + return strcase.ToCamel(name.FieldName) +} + +func (names QueryNames) addFields(group *jen.Group) error { + for _, queryName := range names { + code, err := queryName.QueryType.AddTypeToStatement(jen.Id(queryName.camelCaseName()), false) + if err != nil { + return err + } + if queryName.Documentation != "" { + code = code.Comment(queryName.Documentation) + } + group.Add(code) + } + return nil +} + +func (names QueryNames) addGetBucketNameFunc(group *jen.Group, structName string) (bool, error) { + field := names.getServiceBucketField() + if field == nil || field.ServiceBucket.ToServiceBucketType() == ServiceBucketTypeNone { + return false, nil + } else if field.QueryType.ToStringLikeType() != StringLikeTypeString { + panic(fmt.Sprintf("service bucket field must be string: %s", field.FieldName)) + } + group.Add(jen.Func(). + Params(jen.Id("query").Op("*").Id(structName)). + Id("getBucketName"). + Params(jen.Id("ctx").Qual("context", "Context")). + Params(jen.String(), jen.Error()). + BlockFunc(func(group *jen.Group) { + fieldName := field.camelCaseName() + switch field.ServiceBucket.ToServiceBucketType() { + case ServiceBucketTypePlainText: + group.Add(jen.Return(jen.Id("query").Dot(fieldName), jen.Nil())) + case ServiceBucketTypeEntry: + group.Add( + jen.Return( + jen.Qual("strings", "SplitN"). + Call(jen.Id("query").Dot(fieldName), jen.Lit(":"), jen.Lit(2)). + Index(jen.Lit(0)), + jen.Nil(), + ), + ) + case ServiceBucketTypeUploadToken: + group.Add( + jen.If(jen.Id("putPolicy"), jen.Err()). + Op(":="). + Qual(PackageNameUpToken, "NewParser"). + Call(jen.Id("query").Dot(fieldName)). + Dot("GetPutPolicy"). + Call(jen.Id("ctx")). + Op(";"). + Err(). + Op("!="). + Nil(). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Lit(""), jen.Err())) + }). + Else(). + BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Id("putPolicy").Dot("GetBucketName").Call())) + }), + ) + default: + panic("unknown ServiceBucketType") + } + })) + return true, nil +} + +func (names QueryNames) addBuildFunc(group *jen.Group, structName string) error { + var err error + group.Add( + jen.Func(). + Params(jen.Id("query").Op("*").Id(structName)). + Id("buildQuery"). + Params(). + Params(jen.Qual("net/url", "Values"), jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Add( + jen.Id("allQuery").Op(":=").Make(jen.Qual("net/url", "Values")), + ) + for _, queryName := range names { + if e := names.generateSetCall(group, queryName); e != nil { + err = e + return + } + } + group.Add(jen.Return(jen.Id("allQuery"), jen.Nil())) + }), + ) + return err +} + +func (names QueryNames) generateSetCall(group *jen.Group, queryName QueryName) error { + var ( + valueConvertCode *jen.Statement + err error + ) + fieldName := queryName.camelCaseName() + field := jen.Id("query").Dot(fieldName) + if queryName.Optional.ToOptionalType() == OptionalTypeNullable { + valueConvertCode, err = queryName.QueryType.GenerateConvertCodeToString(jen.Op("*").Add(field)) + } else { + valueConvertCode, err = queryName.QueryType.GenerateConvertCodeToString(field) + } + if err != nil { + return err + } + zeroValue, err := queryName.QueryType.ZeroValue() + if err != nil { + return err + } + + condition := field.Clone() + if queryName.Optional.ToOptionalType() == OptionalTypeNullable { + condition = condition.Op("!=").Nil() + } else if v, ok := zeroValue.(bool); !ok || v { + condition = condition.Op("!=").Lit(zeroValue) + } + setQueryFunc := func(queryName string, value jen.Code) func(group *jen.Group) { + return func(group *jen.Group) { + group.Id("allQuery").Dot("Set").Call(jen.Lit(queryName), value) + } + } + appendMissingRequiredFieldErrorFunc := func(fieldName string) func(group *jen.Group) { + return func(group *jen.Group) { + group.Return( + jen.Nil(), + jen.Qual(PackageNameErrors, "MissingRequiredFieldError"). + ValuesFunc(func(group *jen.Group) { + group.Add(jen.Id("Name").Op(":").Lit(fieldName)) + }), + ) + } + } + switch queryName.Optional.ToOptionalType() { + case OptionalTypeRequired: + group.Add(jen.If(condition). + BlockFunc(setQueryFunc(queryName.QueryName, valueConvertCode)). + Else(). + BlockFunc(appendMissingRequiredFieldErrorFunc(fieldName))) + case OptionalTypeOmitEmpty, OptionalTypeNullable: + group.Add(jen.If(condition). + BlockFunc(setQueryFunc(queryName.QueryName, valueConvertCode))) + case OptionalTypeKeepEmpty: + setQueryFunc(queryName.QueryName, valueConvertCode)(group) + default: + return errors.New("unknown OptionalType") + } + return nil +} + +func (names QueryNames) getServiceBucketField() *QueryName { + var serviceBucketField *QueryName + + for i := range names { + if names[i].ServiceBucket.ToServiceBucketType() != ServiceBucketTypeNone { + if serviceBucketField == nil { + serviceBucketField = &names[i] + } else { + panic(fmt.Sprintf("multiple service bucket fields: %s & %s", names[i].FieldName, serviceBucketField.FieldName)) + } + } + } + return serviceBucketField +} diff --git a/storagev2/internal/api-generator/request.go b/storagev2/internal/api-generator/request.go new file mode 100644 index 00000000..09b81cfe --- /dev/null +++ b/storagev2/internal/api-generator/request.go @@ -0,0 +1,199 @@ +package main + +import ( + "errors" + "fmt" + + "github.com/dave/jennifer/jen" + "gopkg.in/yaml.v3" +) + +type ( + ApiRequestDescription struct { + PathParams *PathParams `yaml:"path_params,omitempty"` + HeaderNames HeaderNames `yaml:"header_names,omitempty"` + QueryNames QueryNames `yaml:"query_names,omitempty"` + Body *RequestBody `yaml:"body,omitempty"` + Authorization *Authorization `yaml:"authorization,omitempty"` + Idempotent *Idempotent `yaml:"idempotent,omitempty"` + responseTypeRequired bool + } + + RequestBody struct { + Json *JsonType + FormUrlencoded *FormUrlencodedRequestStruct + MultipartFormData *MultipartFormFields + BinaryData bool + } +) + +func (request *ApiRequestDescription) generate(group *jen.Group, opts CodeGeneratorOptions) (err error) { + if opts.Documentation != "" { + group.Add(jen.Comment(opts.Documentation)) + } + group.Add(jen.Type(). + Id(opts.camelCaseName()). + StructFunc(func(group *jen.Group) { + err = request.addFields(group) + })) + if err != nil { + return + } + + if body := request.Body; body != nil { + if bodyJson := body.Json; bodyJson != nil { + err = bodyJson.generate(group, opts) + } + } + + return +} + +func (request *ApiRequestDescription) addFields(group *jen.Group) (err error) { + if pp := request.PathParams; pp != nil { + if err = pp.addFields(group); err != nil { + return + } + } + if names := request.QueryNames; names != nil { + if err = names.addFields(group); err != nil { + return + } + } + if names := request.HeaderNames; names != nil { + if err = names.addFields(group); err != nil { + return + } + } + if authorization := request.Authorization; authorization != nil { + switch authorization.ToAuthorization() { + case AuthorizationQbox, AuthorizationQiniu: + group.Add(jen.Id("Credentials").Qual(PackageNameCredentials, "CredentialsProvider").Comment("鉴权参数,用于生成鉴权凭证,如果为空,则使用 HTTPClientOptions 中的 CredentialsProvider")) + case AuthorizationUpToken: + group.Add(jen.Id("UpToken").Qual(PackageNameUpToken, "Provider").Comment("上传凭证,如果为空,则使用 HTTPClientOptions 中的 UpToken")) + } + } + if body := request.Body; body != nil { + if bodyJson := body.Json; bodyJson != nil { + if jsonStruct := bodyJson.Struct; jsonStruct != nil { + if err = jsonStruct.addFields(group, false); err != nil { + return + } + } else if jsonArray := bodyJson.Array; jsonArray != nil { + if err = jsonArray.addFields(group); err != nil { + return + } + } else { + return errors.New("request body should be struct or array") + } + } else if formUrlencoded := body.FormUrlencoded; formUrlencoded != nil { + if err = formUrlencoded.addFields(group); err != nil { + return + } + } else if multipartFormData := body.MultipartFormData; multipartFormData != nil { + if err = multipartFormData.addFields(group); err != nil { + return + } + } else if body.BinaryData { + group.Add(jen.Id("Body").Qual(PackageNameInternalIo, "ReadSeekCloser").Comment("请求体")) + } + } + if request.responseTypeRequired { + group.Add(jen.Id("ResponseBody").Interface().Comment("响应体,如果为空,则 Response.Body 的类型由 encoding/json 库决定")) + } + return +} + +func (request *ApiRequestDescription) generateGetAccessKeyFunc(group *jen.Group, structName string) (err error) { + group.Add( + jen.Func(). + Params(jen.Id("request").Op("*").Id(structName)). + Id("getAccessKey"). + Params(jen.Id("ctx").Qual("context", "Context")). + Params(jen.Id("string"), jen.Error()). + BlockFunc(func(group *jen.Group) { + if authorization := request.Authorization; authorization != nil { + switch authorization.ToAuthorization() { + case AuthorizationQbox, AuthorizationQiniu: + group.Add( + jen.If(jen.Id("request").Dot("Credentials").Op("!=").Nil()).BlockFunc(func(group *jen.Group) { + group.Add( + jen.If( + jen.List(jen.Id("credentials"), jen.Err()). + Op(":="). + Id("request"). + Dot("Credentials"). + Dot("Get"). + Call(jen.Id("ctx")), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Return(jen.Lit(""), jen.Err()) + }).Else().BlockFunc(func(group *jen.Group) { + group.Return(jen.Id("credentials").Dot("AccessKey"), jen.Nil()) + }), + ) + }), + ) + case AuthorizationUpToken: + group.Add( + jen.If(jen.Id("request").Dot("UpToken").Op("!=").Nil()).BlockFunc(func(group *jen.Group) { + group.Return(jen.Id("request").Dot("UpToken").Dot("GetAccessKey").Call(jen.Id("ctx"))) + }), + ) + } + } + if body := request.Body; body != nil { + if multipartForm := body.MultipartFormData; multipartForm != nil { + if field := multipartForm.getServiceBucketField(); field != nil && field.ServiceBucket.ToServiceBucketType() != ServiceBucketTypeNone { + fieldName := field.camelCaseName() + group.Add( + jen.If(jen.Id("request").Dot(fieldName).Op("!=").Nil()).BlockFunc(func(group *jen.Group) { + group.If( + jen.List(jen.Id("accessKey"), jen.Err()). + Op(":="). + Id("request"). + Dot(fieldName). + Dot("GetAccessKey"). + Call(jen.Id("ctx")), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Lit(""), jen.Err())) + }).Else().BlockFunc(func(group *jen.Group) { + group.Add(jen.Return(jen.Id("accessKey"), jen.Nil())) + }) + }), + ) + } + } + } + group.Add(jen.Return(jen.Lit(""), jen.Nil())) + }), + ) + return +} + +func (body *RequestBody) UnmarshalYAML(value *yaml.Node) error { + switch value.ShortTag() { + case "!!str": + switch value.Value { + case "binary_data": + body.BinaryData = true + default: + return fmt.Errorf("unknown request body type: %s", value.Value) + } + return nil + case "!!map": + switch value.Content[0].Value { + case "json": + return value.Content[1].Decode(&body.Json) + case "form_urlencoded": + return value.Content[1].Decode(&body.FormUrlencoded) + case "multipart_form_data": + return value.Content[1].Decode(&body.MultipartFormData) + default: + return fmt.Errorf("unknown request body type: %s", value.Content[0].Value) + } + default: + return fmt.Errorf("unknown request body type: %s", value.ShortTag()) + } +} diff --git a/storagev2/internal/api-generator/response.go b/storagev2/internal/api-generator/response.go new file mode 100644 index 00000000..33c8fc94 --- /dev/null +++ b/storagev2/internal/api-generator/response.go @@ -0,0 +1,89 @@ +package main + +import ( + "errors" + "fmt" + + "github.com/dave/jennifer/jen" + "gopkg.in/yaml.v3" +) + +type ( + ApiResponseDescription struct { + Body *ResponseBody `yaml:"body,omitempty"` + } + + ResponseBody struct { + Json *JsonType + BinaryDataStream bool + } +) + +func (response *ApiResponseDescription) generate(group *jen.Group, opts CodeGeneratorOptions) (err error) { + if opts.Documentation != "" { + group.Add(jen.Comment(opts.Documentation)) + } + group.Add(jen.Type(). + Id(opts.camelCaseName()). + StructFunc(func(group *jen.Group) { + err = response.addFields(group) + })) + if err != nil { + return + } + + if body := response.Body; body != nil { + if bodyJson := body.Json; bodyJson != nil { + err = bodyJson.generate(group, opts) + } + } + + return +} + +func (response *ApiResponseDescription) addFields(group *jen.Group) (err error) { + if body := response.Body; body != nil { + if bodyJson := body.Json; bodyJson != nil { + if jsonStruct := bodyJson.Struct; jsonStruct != nil { + if err = jsonStruct.addFields(group, false); err != nil { + return + } + } else if jsonArray := bodyJson.Array; jsonArray != nil { + if err = jsonArray.addFields(group); err != nil { + return + } + } else if bodyJson.Any { + group.Add(jen.Id("Body").Interface()) + } else if bodyJson.StringMap { + group.Add(jen.Id("Body").Map(jen.String()).String()) + } else { + return errors.New("response body should be struct or array") + } + } else if body.BinaryDataStream { + group.Add(jen.Id("Body").Qual("io", "ReadCloser")) + } + } + return +} + +func (body *ResponseBody) UnmarshalYAML(value *yaml.Node) error { + switch value.ShortTag() { + case "!!str": + switch value.Value { + case "binary_data_stream": + body.BinaryDataStream = true + return nil + default: + return fmt.Errorf("unknown response body type: %s", value.Value) + } + case "!!map": + switch value.Content[0].Value { + case "json": + return value.Content[1].Decode(&body.Json) + default: + return fmt.Errorf("unknown response body type: %s", value.Content[0].Value) + } + default: + return fmt.Errorf("unknown response body type: %s", value.ShortTag()) + } +} diff --git a/storagev2/internal/api-generator/types.go b/storagev2/internal/api-generator/types.go new file mode 100644 index 00000000..4e426331 --- /dev/null +++ b/storagev2/internal/api-generator/types.go @@ -0,0 +1,316 @@ +package main + +import ( + "errors" + "fmt" + + "github.com/dave/jennifer/jen" +) + +type ( + MethodName string + ServiceName string + StringLikeType string + MultipartFormDataType string + OptionalType string + Authorization string + Idempotent string + EncodeType string + ServiceBucketType string + ServiceObjectType string +) + +const ( + MethodNameGET MethodName = "GET" + MethodNamePOST MethodName = "POST" + MethodNamePUT MethodName = "PUT" + MethodNameDELETE MethodName = "DELETE" + + ServiceNameUp ServiceName = "up" + ServiceNameIo ServiceName = "io" + ServiceNameRs ServiceName = "rs" + ServiceNameRsf ServiceName = "rsf" + ServiceNameApi ServiceName = "api" + ServiceNameBucket ServiceName = "uc" + + StringLikeTypeString StringLikeType = "string" + StringLikeTypeInteger StringLikeType = "integer" + StringLikeTypeFloat StringLikeType = "float" + StringLikeTypeBoolean StringLikeType = "boolean" + + MultipartFormDataTypeString MultipartFormDataType = "string" + MultipartFormDataTypeInteger MultipartFormDataType = "integer" + MultipartFormDataTypeUploadToken MultipartFormDataType = "upload_token" + MultipartFormDataTypeBinaryData MultipartFormDataType = "binary_data" + + OptionalTypeRequired OptionalType = "" + OptionalTypeOmitEmpty OptionalType = "omitempty" + OptionalTypeKeepEmpty OptionalType = "keepempty" + OptionalTypeNullable OptionalType = "nullable" + + AuthorizationNone Authorization = "" + AuthorizationQbox Authorization = "Qbox" + AuthorizationQiniu Authorization = "Qiniu" + AuthorizationUpToken Authorization = "UploadToken" + + IdempotentAlways Idempotent = "always" + IdempotentDefault Idempotent = "default" + IdempotentNever Idempotent = "never" + + EncodeTypeNone EncodeType = "none" + EncodeTypeUrlsafeBase64 EncodeType = "url_safe_base64" + EncodeTypeUrlsafeBase64OrNone EncodeType = "url_safe_base64_or_none" + + ServiceBucketTypeNone = "" + ServiceBucketTypePlainText = "plain_text" + ServiceBucketTypeEntry = "entry" + ServiceBucketTypeUploadToken = "upload_token" +) + +func (s MethodName) ToString() (string, error) { + switch s { + case MethodNameGET, MethodNamePOST, MethodNamePUT, MethodNameDELETE: + return string(s), nil + case "get": + return string(MethodNameGET), nil + case "post": + return string(MethodNamePOST), nil + case "put": + return string(MethodNamePUT), nil + case "delete": + return string(MethodNameDELETE), nil + default: + return "", errors.New("unknown method") + } +} + +func (s ServiceName) ToServiceName() (*jen.Statement, error) { + switch s { + case ServiceNameUp: + return jen.Qual(PackageNameRegion, "ServiceUp"), nil + case ServiceNameIo: + return jen.Qual(PackageNameRegion, "ServiceIo"), nil + case ServiceNameRs: + return jen.Qual(PackageNameRegion, "ServiceRs"), nil + case ServiceNameRsf: + return jen.Qual(PackageNameRegion, "ServiceRsf"), nil + case ServiceNameApi: + return jen.Qual(PackageNameRegion, "ServiceApi"), nil + case ServiceNameBucket: + return jen.Qual(PackageNameRegion, "ServiceBucket"), nil + default: + return nil, errors.New("unknown type") + } +} + +func (t *StringLikeType) ToStringLikeType() StringLikeType { + if t == nil { + return StringLikeTypeString + } + switch *t { + case StringLikeTypeString, StringLikeTypeInteger, StringLikeTypeFloat, StringLikeTypeBoolean: + return *t + case "": + return StringLikeTypeString + default: + panic(fmt.Sprintf("unknown StringLikeType: %s", *t)) + } +} + +func (t *StringLikeType) AddTypeToStatement(statement *jen.Statement, nilable bool) (*jen.Statement, error) { + statement = statement.Clone() + if nilable { + statement = statement.Op("*") + } + switch t.ToStringLikeType() { + case StringLikeTypeString: + return statement.String(), nil + case StringLikeTypeInteger: + return statement.Int64(), nil + case StringLikeTypeFloat: + return statement.Float64(), nil + case StringLikeTypeBoolean: + return statement.Bool(), nil + default: + return nil, errors.New("unknown type") + } +} + +func (t *StringLikeType) GenerateConvertCodeToString(id *jen.Statement) (*jen.Statement, error) { + switch t.ToStringLikeType() { + case StringLikeTypeString: + return id.Clone(), nil + case StringLikeTypeInteger: + return jen.Qual("strconv", "FormatInt").Call(id.Clone(), jen.Lit(10)), nil + case StringLikeTypeFloat: + return jen.Qual("strconv", "FormatFloat").Call(id.Clone(), jen.LitByte('g'), jen.Lit(-1), jen.Lit(64)), nil + case StringLikeTypeBoolean: + return jen.Qual("strconv", "FormatBool").Call(id.Clone()), nil + default: + return nil, errors.New("unknown type") + } +} + +func (t *StringLikeType) ZeroValue() (interface{}, error) { + switch t.ToStringLikeType() { + case StringLikeTypeString: + return "", nil + case StringLikeTypeInteger: + return 0, nil + case StringLikeTypeFloat: + return 0.0, nil + case StringLikeTypeBoolean: + return false, nil + default: + return nil, errors.New("unknown type") + } +} + +func (t *MultipartFormDataType) ToMultipartFormDataType() MultipartFormDataType { + if t == nil { + return MultipartFormDataTypeString + } + switch *t { + case MultipartFormDataTypeString, MultipartFormDataTypeInteger, MultipartFormDataTypeUploadToken, MultipartFormDataTypeBinaryData: + return *t + case "": + return MultipartFormDataTypeString + default: + panic(fmt.Sprintf("unknown StringLikeType: %s", *t)) + } +} + +func (t *MultipartFormDataType) ZeroValue() (interface{}, error) { + switch t.ToMultipartFormDataType() { + case MultipartFormDataTypeString: + return "", nil + case MultipartFormDataTypeInteger: + return 0, nil + case MultipartFormDataTypeUploadToken: + return nil, nil + case MultipartFormDataTypeBinaryData: + return nil, nil + default: + return nil, errors.New("unknown type") + } +} + +func (t *MultipartFormDataType) AddTypeToStatement(statement *jen.Statement, nilable bool) (*jen.Statement, error) { + statement = statement.Clone() + switch t.ToMultipartFormDataType() { + case MultipartFormDataTypeString: + if nilable { + statement = statement.Op("*") + } + return statement.String(), nil + case MultipartFormDataTypeInteger: + if nilable { + statement = statement.Op("*") + } + return statement.Int64(), nil + case MultipartFormDataTypeUploadToken: + return statement.Qual(PackageNameUpToken, "Provider"), nil + case MultipartFormDataTypeBinaryData: + if nilable { + statement = statement.Op("*") + } + return statement.Qual(PackageNameHTTPClient, "MultipartFormBinaryData"), nil + default: + return nil, errors.New("unknown type") + } +} + +func (t *OptionalType) ToOptionalType() OptionalType { + if t == nil { + return OptionalTypeRequired + } + switch *t { + case OptionalTypeRequired, OptionalTypeOmitEmpty, OptionalTypeKeepEmpty, OptionalTypeNullable: + return *t + default: + panic(fmt.Sprintf("unknown OptionalType: %s", *t)) + } +} + +func (t *Authorization) ToAuthorization() Authorization { + if t == nil { + return AuthorizationNone + } + switch *t { + case AuthorizationNone, AuthorizationQbox, AuthorizationQiniu, AuthorizationUpToken: + return *t + case "qbox": + return AuthorizationQbox + case "qiniu": + return AuthorizationQiniu + case "upload_token": + return AuthorizationUpToken + default: + panic(fmt.Sprintf("unknown Authorization: %s", *t)) + } +} + +func (t *Idempotent) ToIdempotent() Idempotent { + if t == nil { + return IdempotentDefault + } + switch *t { + case IdempotentAlways, IdempotentDefault, IdempotentNever: + return *t + case "": + return IdempotentDefault + default: + panic(fmt.Sprintf("unknown Idempotent: %s", *t)) + } +} + +func (t *EncodeType) ToEncodeType() EncodeType { + if t == nil { + return EncodeTypeNone + } + switch *t { + case EncodeTypeNone, EncodeTypeUrlsafeBase64, EncodeTypeUrlsafeBase64OrNone: + return *t + case "": + return EncodeTypeNone + default: + panic(fmt.Sprintf("unknown EncodeType: %s", *t)) + } +} + +func (t *ServiceBucketType) ToServiceBucketType() ServiceBucketType { + if t == nil { + return ServiceBucketTypeNone + } + switch *t { + case ServiceBucketTypeNone, ServiceBucketTypePlainText, ServiceBucketTypeEntry, ServiceBucketTypeUploadToken: + return *t + default: + panic(fmt.Sprintf("unknown ServiceBucketType: %s", *t)) + } +} + +func (authorization Authorization) addGetBucketNameFunc(group *jen.Group, structName string) (bool, error) { + if authorization.ToAuthorization() == AuthorizationUpToken { + group.Add(jen.Func(). + Params(jen.Id("request").Op("*").Id(structName)). + Id("getBucketName"). + Params(jen.Id("ctx").Qual("context", "Context")). + Params(jen.String(), jen.Error()). + BlockFunc(func(group *jen.Group) { + group.Add(jen.If(jen.Id("request").Dot("UpToken").Op("!=").Nil()).BlockFunc(func(group *jen.Group) { + group.If( + jen.List(jen.Id("putPolicy"), jen.Err()).Op(":=").Id("request").Dot("UpToken").Dot("GetPutPolicy").Call(jen.Id("ctx")), + jen.Err().Op("!=").Nil(), + ).BlockFunc(func(group *jen.Group) { + group.Return(jen.Lit(""), jen.Err()) + }).Else().BlockFunc(func(group *jen.Group) { + group.Return(jen.Id("putPolicy").Dot("GetBucketName").Call()) + }) + })) + group.Add(jen.Return(jen.Lit(""), jen.Nil())) + })) + return true, nil + } + return false, nil +} diff --git a/storagev2/internal/api-generator/utils.go b/storagev2/internal/api-generator/utils.go new file mode 100644 index 00000000..42d95349 --- /dev/null +++ b/storagev2/internal/api-generator/utils.go @@ -0,0 +1,14 @@ +package main + +import ( + "path/filepath" + "strings" +) + +func extractApiSpecName(name string) string { + baseName := filepath.Base(name) + if index := strings.Index(baseName, "."); index >= 0 { + baseName = baseName[:index] + } + return baseName +} diff --git a/storagev2/region/query.go b/storagev2/region/query.go new file mode 100644 index 00000000..4006d072 --- /dev/null +++ b/storagev2/region/query.go @@ -0,0 +1,295 @@ +package region + +import ( + "context" + "crypto/md5" + "fmt" + "hash/crc64" + "math" + "os" + "path/filepath" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/qiniu/go-sdk/v7/internal/cache" + "github.com/qiniu/go-sdk/v7/internal/clientv2" + "github.com/qiniu/go-sdk/v7/internal/hostprovider" + "github.com/qiniu/go-sdk/v7/internal/log" +) + +type ( + // BucketRegionsQuery 空间区域查询器 + BucketRegionsQuery interface { + Query(accessKey, bucketName string) RegionsProvider + } + + bucketRegionsQuery struct { + bucketHosts Endpoints + cache *cache.Cache + client clientv2.Client + useHttps bool + } + + // BucketRegionsQuery 空间区域查询器选项 + BucketRegionsQueryOptions struct { + // 使用 HTTP 协议 + UseInsecureProtocol bool + + // 压缩周期(默认:60s) + CompactInterval time.Duration + + // 持久化路径(默认:$TMPDIR/qiniu-golang-sdk/query_v4_01.cache.json) + PersistentFilePath string + + // 持久化周期(默认:60s) + PersistentDuration time.Duration + + // 单域名重试次数(默认:2) + RetryMax int + + // 主备域名冻结时间(默认:600s),当一个域名请求失败(单个域名会被重试 RetryMax 次),会被冻结一段时间,使用备用域名进行重试,在冻结时间内,域名不能被使用,当一个操作中所有域名竣备冻结操作不在进行重试,返回最后一次操作的错误。 + HostFreezeDuration time.Duration + + // HTTP 客户端,如果不配置则使用默认的 HTTP 客户端 + Client clientv2.Client + } + + bucketRegionsProvider struct { + accessKey string + bucketName string + cacheKey string + query *bucketRegionsQuery + } + + v4QueryCacheValue struct { + Regions []*Region `json:"regions"` + ExpiredAt time.Time `json:"expired_at"` + } + + v4QueryServiceHosts struct { + Domains []string `json:"domains"` + Old []string `json:"old"` + } + + v4QueryRegion struct { + RegionId string `json:"region"` + Ttl int64 `json:"ttl"` + Io v4QueryServiceHosts `json:"io"` + IoSrc v4QueryServiceHosts `json:"io_src"` + Up v4QueryServiceHosts `json:"up"` + Rs v4QueryServiceHosts `json:"rs"` + Rsf v4QueryServiceHosts `json:"rsf"` + Api v4QueryServiceHosts `json:"api"` + Uc v4QueryServiceHosts `json:"uc"` + } + + v4QueryResponse struct { + Hosts []v4QueryRegion `json:"hosts"` + } +) + +const cacheFileName = "query_v4_01.cache.json" + +var ( + persistentCaches map[uint64]*cache.Cache + persistentCachesLock sync.Mutex +) + +// NewBucketRegionsQuery 创建空间区域查询器 +func NewBucketRegionsQuery(bucketHosts Endpoints, opts *BucketRegionsQueryOptions) (BucketRegionsQuery, error) { + if opts == nil { + opts = &BucketRegionsQueryOptions{} + } + if opts.RetryMax <= 0 { + opts.RetryMax = 2 + } + if opts.CompactInterval == time.Duration(0) { + opts.CompactInterval = time.Minute + } + if opts.PersistentFilePath == "" { + opts.PersistentFilePath = filepath.Join(os.TempDir(), "qiniu-golang-sdk", cacheFileName) + } + if opts.PersistentDuration == time.Duration(0) { + opts.PersistentDuration = time.Minute + } + + persistentCache, err := getPersistentCache(opts) + if err != nil { + return nil, err + } + return &bucketRegionsQuery{ + bucketHosts: bucketHosts, + cache: persistentCache, + client: makeBucketQueryClient(opts.Client, bucketHosts, !opts.UseInsecureProtocol, opts.RetryMax, opts.HostFreezeDuration), + useHttps: !opts.UseInsecureProtocol, + }, nil +} + +func getPersistentCache(opts *BucketRegionsQueryOptions) (*cache.Cache, error) { + var ( + persistentCache *cache.Cache + ok bool + err error + ) + + crc64Value := calcPersistentCacheCrc64(opts) + persistentCachesLock.Lock() + defer persistentCachesLock.Unlock() + + if persistentCaches == nil { + persistentCaches = make(map[uint64]*cache.Cache) + } + if persistentCache, ok = persistentCaches[crc64Value]; !ok { + persistentCache, err = cache.NewPersistentCache( + reflect.TypeOf(&v4QueryCacheValue{}), + opts.PersistentFilePath, + opts.CompactInterval, + opts.PersistentDuration, + func(err error) { + log.Warn(fmt.Sprintf("BucketRegionsQuery persist error: %s", err)) + }) + if err != nil { + return nil, err + } + persistentCaches[crc64Value] = persistentCache + } + return persistentCache, nil +} + +// Query 查询空间区域,返回 region.RegionsProvider +func (query *bucketRegionsQuery) Query(accessKey, bucketName string) RegionsProvider { + return &bucketRegionsProvider{ + accessKey: accessKey, + bucketName: bucketName, + query: query, + cacheKey: makeRegionCacheKey(accessKey, bucketName, query.bucketHosts), + } +} + +func (provider *bucketRegionsProvider) GetRegions(ctx context.Context) ([]*Region, error) { + var err error + cacheValue, status := provider.query.cache.Get(provider.cacheKey, func() (cache.CacheValue, error) { + var ret v4QueryResponse + url := fmt.Sprintf("%s/v4/query?ak=%s&bucket=%s", provider.query.bucketHosts.firstUrl(provider.query.useHttps), provider.accessKey, provider.bucketName) + if err = clientv2.DoAndDecodeJsonResponse(provider.query.client, clientv2.RequestParams{ + Context: ctx, + Method: clientv2.RequestMethodGet, + Url: url, + }, &ret); err != nil { + return nil, err + } + return ret.toCacheValue(), nil + }) + if status == cache.NoResultGot { + return nil, err + } + return cacheValue.(*v4QueryCacheValue).Regions, nil +} + +func (left *v4QueryCacheValue) IsEqual(rightValue cache.CacheValue) bool { + if right, ok := rightValue.(*v4QueryCacheValue); ok { + if len(left.Regions) != len(right.Regions) { + return false + } + for idx := range left.Regions { + if !left.Regions[idx].IsEqual(right.Regions[idx]) { + return false + } + } + return true + } + return false +} + +func (left *v4QueryCacheValue) IsValid() bool { + return time.Now().Before(left.ExpiredAt) +} + +func (response *v4QueryResponse) toCacheValue() *v4QueryCacheValue { + var ( + minTtl = int64(math.MaxInt64) + regions = make([]*Region, 0, len(response.Hosts)) + ) + for _, host := range response.Hosts { + regions = append(regions, host.toCacheValue()) + if host.Ttl < minTtl { + minTtl = host.Ttl + } + } + return &v4QueryCacheValue{ + Regions: regions, + ExpiredAt: time.Now().Add(time.Duration(minTtl) * time.Second), + } +} + +func (response *v4QueryRegion) toCacheValue() *Region { + return &Region{ + RegionID: response.RegionId, + Up: response.Up.toCacheValue(), + Io: response.Io.toCacheValue(), + IoSrc: response.IoSrc.toCacheValue(), + Rs: response.Rs.toCacheValue(), + Rsf: response.Rsf.toCacheValue(), + Api: response.Api.toCacheValue(), + Bucket: response.Uc.toCacheValue(), + } +} + +func (response *v4QueryServiceHosts) toCacheValue() Endpoints { + return Endpoints{ + Preferred: response.Domains, + Alternative: response.Old, + } +} + +func makeRegionCacheKey(accessKey, bucketName string, bucketHosts Endpoints) string { + return fmt.Sprintf("%s:%s:%s", accessKey, bucketName, makeBucketHostsCacheKey(bucketHosts)) +} + +func makeBucketHostsCacheKey(serviceHosts Endpoints) string { + return fmt.Sprintf("%s:%s", makeHostsCacheKey(serviceHosts.Preferred), makeHostsCacheKey(serviceHosts.Alternative)) +} + +func makeHostsCacheKey(hosts []string) string { + sortedHosts := append(make([]string, 0, len(hosts)), hosts...) + sort.StringSlice(sortedHosts).Sort() + return fmt.Sprintf("%x", md5.Sum([]byte(strings.Join(sortedHosts, ",")))) +} + +func makeBucketQueryClient(client clientv2.Client, bucketHosts Endpoints, useHttps bool, retryMax int, hostFreezeDuration time.Duration) clientv2.Client { + is := []clientv2.Interceptor{ + clientv2.NewHostsRetryInterceptor(clientv2.HostsRetryConfig{ + RetryConfig: clientv2.RetryConfig{ + RetryMax: len(bucketHosts.Preferred) + len(bucketHosts.Alternative), + RetryInterval: nil, + ShouldRetry: nil, + }, + ShouldFreezeHost: nil, + HostFreezeDuration: hostFreezeDuration, + HostProvider: hostprovider.NewWithHosts(bucketHosts.allUrls(useHttps)), + }), + clientv2.NewSimpleRetryInterceptor(clientv2.RetryConfig{ + RetryMax: retryMax, + RetryInterval: nil, + ShouldRetry: nil, + }), + } + return clientv2.NewClient(client, is...) +} + +func (opts *BucketRegionsQueryOptions) toBytes() []byte { + bytes := make([]byte, 0, 1024) + bytes = strconv.AppendInt(bytes, int64(opts.CompactInterval), 36) + bytes = append(bytes, []byte(opts.PersistentFilePath)...) + bytes = append(bytes, byte(0)) + bytes = strconv.AppendInt(bytes, int64(opts.PersistentDuration), 36) + return bytes +} + +func calcPersistentCacheCrc64(opts *BucketRegionsQueryOptions) uint64 { + return crc64.Checksum(opts.toBytes(), crc64.MakeTable(crc64.ISO)) +} diff --git a/storagev2/region/query_test.go b/storagev2/region/query_test.go new file mode 100644 index 00000000..0c4abb21 --- /dev/null +++ b/storagev2/region/query_test.go @@ -0,0 +1,137 @@ +//go:build unit +// +build unit + +package region + +import ( + "context" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "reflect" + "sync/atomic" + "testing" +) + +func TestBucketRegionsQuery(t *testing.T) { + const accessKey = "fakeaccesskey" + const bucketName = "fakeBucketName" + var callCount uint64 + mux := http.NewServeMux() + mux.HandleFunc("/v4/query", func(w http.ResponseWriter, r *http.Request) { + if gotAk := r.URL.Query().Get("ak"); gotAk != accessKey { + t.Fatalf("Unexpected ak: %s", gotAk) + } + if gotBucketName := r.URL.Query().Get("bucket"); gotBucketName != bucketName { + t.Fatalf("Unexpected bucket: %s", gotBucketName) + } + if _, err := io.WriteString(w, mockUcQueryResponseBody()); err != nil { + t.Fatal(err) + } + atomic.AddUint64(&callCount, 1) + }) + server := httptest.NewServer(mux) + defer server.Close() + + cacheFile, err := ioutil.TempFile("", "") + if err != nil { + t.Fatal(err) + } + defer os.Remove(cacheFile.Name()) + defer cacheFile.Close() + + query, err := NewBucketRegionsQuery(Endpoints{Preferred: []string{server.URL}}, &BucketRegionsQueryOptions{ + PersistentFilePath: cacheFile.Name(), + }) + if err != nil { + t.Fatal(err) + } + for i := 0; i < 2; i++ { + regions, err := query.Query(accessKey, bucketName).GetRegions(context.Background()) + if err != nil { + t.Fatal(err) + } + if regionsCount := len(regions); regionsCount != 2 { + t.Fatalf("Unexpected regions count: %d", regionsCount) + } + if regionId := regions[0].RegionID; regionId != "z0" { + t.Fatalf("Unexpected regionId: %s", regionId) + } + if preferredUp := regions[0].Up.Preferred; !reflect.DeepEqual(preferredUp, []string{"upload.qiniup.com", "up.qiniup.com"}) { + t.Fatalf("Unexpected preferred up domains: %v", preferredUp) + } + if alternativeUp := regions[0].Up.Alternative; !reflect.DeepEqual(alternativeUp, []string{"upload.qbox.me", "up.qbox.me"}) { + t.Fatalf("Unexpected alternative up domains: %v", alternativeUp) + } + if regionId := regions[1].RegionID; regionId != "z1" { + t.Fatalf("Unexpected regionId: %s", regionId) + } + if preferredUp := regions[1].Up.Preferred; !reflect.DeepEqual(preferredUp, []string{"upload-z1.qiniup.com", "up-z1.qiniup.com"}) { + t.Fatalf("Unexpected preferred up domains: %v", preferredUp) + } + if alternativeUp := regions[1].Up.Alternative; !reflect.DeepEqual(alternativeUp, []string{"upload-z1.qbox.me", "up-z1.qbox.me"}) { + t.Fatalf("Unexpected alternative up domains: %v", alternativeUp) + } + } + if cc := atomic.LoadUint64(&callCount); cc != 1 { + t.Fatalf("Unexpected call count: %d", cc) + } +} + +func mockUcQueryResponseBody() string { + return ` + { + "hosts": [ + { + "region": "z0", + "ttl": 86400, + "io": { + "domains": ["iovip.qbox.me"] + }, + "up": { + "domains": ["upload.qiniup.com", "up.qiniup.com"], + "old": ["upload.qbox.me", "up.qbox.me"] + }, + "uc": { + "domains": ["uc.qbox.me"] + }, + "rs": { + "domains": ["rs-z0.qbox.me"] + }, + "rsf": { + "domains": ["rsf-z0.qbox.me"] + }, + "api": { + "domains": ["api.qiniu.com"] + } + }, + { + "region": "z1", + "ttl": 86400, + "io": { + "domains": ["iovip-z1.qbox.me"] + }, + "up": { + "domains": ["upload-z1.qiniup.com", "up-z1.qiniup.com"], + "old": ["upload-z1.qbox.me", "up-z1.qbox.me"] + }, + "uc": { + "domains": ["uc.qbox.me"] + }, + "rs": { + "domains": ["rs-z1.qbox.me"] + }, + "rsf": { + "domains": ["rsf-z1.qbox.me"] + }, + "api": { + "domains": ["api-z1.qiniu.com"] + } + } + ], + "ttl": 86400 + } + ` +} diff --git a/storagev2/region/region.go b/storagev2/region/region.go new file mode 100644 index 00000000..62adae87 --- /dev/null +++ b/storagev2/region/region.go @@ -0,0 +1,308 @@ +package region + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + "time" + + "github.com/qiniu/go-sdk/v7/internal/freezer" + "github.com/qiniu/go-sdk/v7/internal/hostprovider" +) + +type ( + // 服务地址 + // + // 可以存储域名或 IP,端口和协议可选 + Endpoints struct { + Preferred []string `json:"preferred,omitempty"` + Alternative []string `json:"alternative,omitempty"` + } + + // 区域信息 + // + // 可能有多个机房信息,每个机房可能有多个服务地址 + // + // 如果使用公有云,建议使用 GetRegionByID 方法直接获取获取区域实例,不建议手动设置服务地址 + Region struct { + RegionID string `json:"region_id,omitempty"` // 区域 ID + Up Endpoints `json:"up,omitempty"` // Up 服务域名 + Io Endpoints `json:"io,omitempty"` // Io 服务域名 + IoSrc Endpoints `json:"io_src,omitempty"` // IoSrc 服务域名 + Rs Endpoints `json:"rs,omitempty"` // Rs 服务域名 + Rsf Endpoints `json:"rsf,omitempty"` // Rsf 服务域名 + Api Endpoints `json:"api,omitempty"` // Api 服务域名 + Bucket Endpoints `json:"bucket,omitempty"` // Bucket 服务域名 + } + + // 区域提供者 + RegionsProvider interface { + GetRegions(context.Context) ([]*Region, error) + } + + // 服务名称 + ServiceName string + + // 服务地址迭代器 + EndpointsIter struct { + endpoints Endpoints + index int + isAlternative bool + } + + // 服务地址提供者 + EndpointsProvider interface { + GetEndpoints(context.Context) (Endpoints, error) + } + + endpointsHostProvider struct { + iter *EndpointsIter + freezer freezer.Freezer + lastFreezeErr error + } +) + +const ( + // Up 服务 + ServiceUp ServiceName = "up" + // Io 服务 + ServiceIo ServiceName = "io" + // IoSrc 服务 + ServiceIoSrc ServiceName = "io_src" + // Rs 服务 + ServiceRs ServiceName = "rs" + // Rsf 服务 + ServiceRsf ServiceName = "rsf" + // Api 服务 + ServiceApi ServiceName = "api" + // Bucket 服务 + ServiceBucket ServiceName = "bucket" +) + +var ( + ErrUnrecognizedServiceName = errors.New("unrecognized service name") +) + +// 根据 RegionID 获取公有云区域信息 +func GetRegionByID(regionID string, useHttps bool) *Region { + region := &Region{RegionID: regionID} + if regionID == "z0" { + region.Up.Preferred = []string{makeHost("upload.qiniup.com", useHttps), makeHost("upload-z0.qiniup.com", useHttps), makeHost("up.qiniup.com", useHttps), makeHost("up-z0.qiniup.com", useHttps)} + region.Up.Alternative = []string{makeHost("up.qbox.me", useHttps), makeHost("up-z0.qbox.me", useHttps)} + region.Io.Preferred = []string{makeHost("iovip.qiniuio.com", useHttps), makeHost("iovip-z0.qiniuio.com", useHttps)} + region.Io.Alternative = []string{makeHost("iovip.qbox.me", useHttps), makeHost("iovip-z0.qbox.me", useHttps)} + } else { + region.Up.Preferred = []string{makeHost(fmt.Sprintf("upload-%s.qiniup.com", regionID), useHttps), makeHost(fmt.Sprintf("up-%s.qiniup.com", regionID), useHttps)} + region.Io.Preferred = []string{makeHost(fmt.Sprintf("iovip-%s.qiniuio.com", regionID), useHttps)} + } + region.Rs.Preferred = []string{makeHost(fmt.Sprintf("rs-%s.qiniuapi.com", regionID), useHttps)} + region.Rsf.Preferred = []string{makeHost(fmt.Sprintf("rsf-%s.qiniuapi.com", regionID), useHttps)} + region.Api.Preferred = []string{makeHost(fmt.Sprintf("api-%s.qiniuapi.com", regionID), useHttps)} + region.Bucket.Preferred = []string{makeHost("uc.qiniuapi.com", useHttps), makeHost("kodo-config.qiniuapi.com", useHttps)} + region.Bucket.Alternative = []string{makeHost("uc.qbox.me", useHttps)} + return region +} + +func makeHost(domain string, useHttps bool) string { + if useHttps { + return "https://" + domain + } else { + return "http://" + domain + } +} + +func (region *Region) GetRegions(context.Context) ([]*Region, error) { + return []*Region{region}, nil +} + +func (region *Region) Endpoints(serviceNames []ServiceName) (Endpoints, error) { + var endpoint Endpoints + for _, serviceName := range serviceNames { + switch serviceName { + case ServiceUp: + endpoint = endpoint.Join(region.Up) + case ServiceIo: + endpoint = endpoint.Join(region.Io) + case ServiceIoSrc: + endpoint = endpoint.Join(region.IoSrc) + case ServiceRs: + endpoint = endpoint.Join(region.Rs) + case ServiceRsf: + endpoint = endpoint.Join(region.Rsf) + case ServiceApi: + endpoint = endpoint.Join(region.Api) + case ServiceBucket: + endpoint = endpoint.Join(region.Bucket) + default: + return endpoint, ErrUnrecognizedServiceName + } + } + return endpoint, nil +} + +func (region *Region) EndpointsIter(serviceNames []ServiceName) (*EndpointsIter, error) { + endpoints, err := region.Endpoints(serviceNames) + if err != nil { + return nil, err + } + return endpoints.Iter(), nil +} + +func (left *Region) IsEqual(right *Region) bool { + return left.RegionID == right.RegionID && + left.Up.IsEqual(right.Up) && + left.Io.IsEqual(right.Io) && + left.IoSrc.IsEqual(right.IoSrc) && + left.Rs.IsEqual(right.Rs) && + left.Rsf.IsEqual(right.Rsf) && + left.Api.IsEqual(right.Api) && + left.Bucket.IsEqual(right.Bucket) +} + +func (left Endpoints) Join(rights ...Endpoints) Endpoints { + newEndpoint := left + for _, right := range rights { + if len(newEndpoint.Preferred) == 0 { + newEndpoint.Preferred = right.Preferred + } else { + newEndpoint.Preferred = append(newEndpoint.Preferred, right.Preferred...) + } + if len(newEndpoint.Alternative) == 0 { + newEndpoint.Alternative = right.Alternative + } else { + newEndpoint.Alternative = append(newEndpoint.Alternative, right.Alternative...) + } + } + + return newEndpoint +} + +func (left Endpoints) IsEqual(right Endpoints) bool { + return reflect.DeepEqual(left.Preferred, right.Preferred) && + reflect.DeepEqual(left.Alternative, right.Alternative) +} + +func (hosts Endpoints) Iter() *EndpointsIter { + return &EndpointsIter{endpoints: hosts} +} + +func (endpoints Endpoints) IsEmpty() bool { + return len(endpoints.Preferred) == 0 && len(endpoints.Alternative) == 0 +} + +func (endpoints Endpoints) firstUrl(useHttps bool) string { + for _, preferred := range endpoints.Preferred { + return makeUrlFromHost(preferred, useHttps) + } + for _, alternative := range endpoints.Alternative { + return makeUrlFromHost(alternative, useHttps) + } + return "" +} + +func (endpoints Endpoints) GetEndpoints(context.Context) (Endpoints, error) { + return endpoints, nil +} + +func (endpoints Endpoints) allUrls(useHttps bool) []string { + allHosts := make([]string, 0, len(endpoints.Preferred)+len(endpoints.Alternative)) + for _, preferred := range endpoints.Preferred { + allHosts = append(allHosts, makeUrlFromHost(preferred, useHttps)) + } + for _, alternative := range endpoints.Alternative { + allHosts = append(allHosts, makeUrlFromHost(alternative, useHttps)) + } + return allHosts +} + +func (endpoints Endpoints) ToHostProvider() hostprovider.HostProvider { + return &endpointsHostProvider{ + iter: endpoints.Iter(), + freezer: freezer.New(), + } +} + +func (endpoints Endpoints) Clone() Endpoints { + return Endpoints{ + Preferred: append([]string{}, endpoints.Preferred...), + Alternative: append([]string{}, endpoints.Alternative...), + } +} + +func makeUrlFromHost(host string, useHttps bool) string { + if strings.HasPrefix(host, "http://") || strings.HasPrefix(host, "https://") { + return host + } + if useHttps { + return "https://" + host + } else { + return "http://" + host + } +} + +func (iter *EndpointsIter) Next(nextHost *string) bool { + if iter.isAlternative { + if iter.index >= len(iter.endpoints.Alternative) { + return false + } + host := iter.endpoints.Alternative[iter.index] + iter.index += 1 + *nextHost = host + return true + } + if iter.index >= len(iter.endpoints.Preferred) { + iter.isAlternative = true + iter.index = 0 + return iter.Next(nextHost) + } + host := iter.endpoints.Preferred[iter.index] + iter.index += 1 + *nextHost = host + return true +} + +func (iter *EndpointsIter) More() bool { + if iter.isAlternative { + return iter.index < len(iter.endpoints.Alternative) + } else if iter.index >= len(iter.endpoints.Preferred) { + return len(iter.endpoints.Alternative) > 0 + } + return true +} + +func (iter *EndpointsIter) SwitchToAlternative() { + if len(iter.endpoints.Alternative) > 0 && !iter.isAlternative { + iter.isAlternative = true + iter.index = 0 + } +} + +func (provider *endpointsHostProvider) Freeze(host string, cause error, duration time.Duration) error { + if duration <= 0 { + return nil + } + + provider.lastFreezeErr = cause + return provider.freezer.Freeze(host, duration) +} + +func (provider *endpointsHostProvider) Provider() (string, error) { + if provider.iter.endpoints.IsEmpty() { + return "", hostprovider.ErrNoHostFound + } + + var host string + for provider.iter.Next(&host) { + if provider.freezer.Available(host) { + return host, nil + } + } + + if provider.lastFreezeErr != nil { + return "", provider.lastFreezeErr + } else { + return "", hostprovider.ErrAllHostsFrozen + } +} diff --git a/storagev2/region/region_test.go b/storagev2/region/region_test.go new file mode 100644 index 00000000..f3db90fd --- /dev/null +++ b/storagev2/region/region_test.go @@ -0,0 +1,85 @@ +//go:build unit +// +build unit + +package region + +import ( + "testing" +) + +func TestRegion(t *testing.T) { + region := GetRegionByID("z0", true) + iter, err := region.EndpointsIter([]ServiceName{ServiceUp}) + if err != nil { + t.Fatal(err) + } + var domain string + if ok := iter.Next(&domain); !ok { + t.Fatalf("should get next domain") + } else if domain != "https://upload.qiniup.com" { + t.Fatalf("unexpected domain: %s", domain) + } + + if ok := iter.Next(&domain); !ok { + t.Fatalf("should get next domain") + } else if domain != "https://upload-z0.qiniup.com" { + t.Fatalf("unexpected domain: %s", domain) + } + + if ok := iter.Next(&domain); !ok { + t.Fatalf("should get next domain") + } else if domain != "https://up.qiniup.com" { + t.Fatalf("unexpected domain: %s", domain) + } + + if ok := iter.Next(&domain); !ok { + t.Fatalf("should get next domain") + } else if domain != "https://up-z0.qiniup.com" { + t.Fatalf("unexpected domain: %s", domain) + } + + if ok := iter.Next(&domain); !ok { + t.Fatalf("should get next domain") + } else if domain != "https://up.qbox.me" { + t.Fatalf("unexpected domain: %s", domain) + } + + if ok := iter.Next(&domain); !ok { + t.Fatalf("should get next domain") + } else if domain != "https://up-z0.qbox.me" { + t.Fatalf("unexpected domain: %s", domain) + } + + if ok := iter.Next(&domain); ok { + t.Fatalf("should not get next domain") + } + + iter, err = region.EndpointsIter([]ServiceName{ServiceUp}) + if err != nil { + t.Fatal(err) + } + + if ok := iter.Next(&domain); !ok { + t.Fatalf("should get next domain") + } else if domain != "https://upload.qiniup.com" { + t.Fatalf("unexpected domain: %s", domain) + } + + iter.SwitchToAlternative() + + if ok := iter.Next(&domain); !ok { + t.Fatalf("should get next domain") + } else if domain != "https://up.qbox.me" { + t.Fatalf("unexpected domain: %s", domain) + } + + if ok := iter.Next(&domain); !ok { + t.Fatalf("should get next domain") + } else if domain != "https://up-z0.qbox.me" { + t.Fatalf("unexpected domain: %s", domain) + } + + if ok := iter.Next(&domain); ok { + t.Fatalf("should not get next domain") + } +} diff --git a/storagev2/uptoken/putpolicy.go b/storagev2/uptoken/putpolicy.go new file mode 100644 index 00000000..4734b538 --- /dev/null +++ b/storagev2/uptoken/putpolicy.go @@ -0,0 +1,407 @@ +package uptoken + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + "time" +) + +// PutPolicy 存储上传策略 +type PutPolicy map[string]interface{} + +const ( + putPolicyKeyScope = "scope" + putPolicyKeyDeadline = "deadline" + putPolicyKeyIsPrefixalScope = "isPrefixalScope" + putPolicyKeyInsertOnly = "insertOnly" + putPolicyKeyEndUser = "endUser" + putPolicyKeyReturnUrl = "returnUrl" + putPolicyKeyReturnBody = "returnBody" + putPolicyKeyCallbackUrl = "callbackUrl" + putPolicyKeyCallbackHost = "callbackHost" + putPolicyKeyCallbackBody = "callbackBody" + putPolicyKeyCallbackBodyType = "callbackBodyType" + putPolicyKeyPersistentOps = "persistentOps" + putPolicyKeyPersistentNotifyUrl = "persistentNotifyUrl" + putPolicyKeyPersistentPipeline = "persistentPipeline" + putPolicyKeyForceSaveKey = "forceSaveKey" + putPolicyKeySaveKey = "saveKey" + putPolicyKeyFsizeMin = "fsizeMin" + putPolicyKeyFsizeLimit = "fsizeLimit" + putPolicyKeyDetectMime = "detectMime" + putPolicyKeyMimeLimit = "mimeLimit" + putPolicyKeyFileType = "fileType" +) + +var ( + // ErrEmptyBucketName 空的 Bucket 名称 + ErrEmptyBucketName = errors.New("empty bucket name") + + // ErrInvalidPolicyValue 非法凭证值 + ErrInvalidPolicyValue = errors.New("invalid put policy value") +) + +// FieldError indicates an error condition occurred while setting put policy +type FieldError struct { + Err error +} + +func (e *FieldError) Error() string { + return fmt.Sprintf("failed to set put policy: %v", e.Err) +} + +// Unwrap returns the underlying error cause +func (e *FieldError) Unwrap() error { + return e.Err +} + +// NewPutPolicy 为指定的空间生成上传策略 +func NewPutPolicy(bucket string, expiry time.Time) (PutPolicy, error) { + return NewPutPolicyWithKey(bucket, "", expiry) +} + +// NewPutPolicyWithKey 为指定的空间和对象名称生成上传策略 +func NewPutPolicyWithKey(bucket, key string, expiry time.Time) (PutPolicy, error) { + if bucket == "" { + return nil, &FieldError{Err: ErrEmptyBucketName} + } + scope := bucket + if key != "" { + scope += ":" + key + } + return make(PutPolicy).SetScope(scope).SetDeadline(expiry.Unix()), nil +} + +// NewPutPolicyWithKeyPrefix 为指定的空间和对象前缀生成上传策略 +func NewPutPolicyWithKeyPrefix(bucket, keyPrefix string, expiry time.Time) (PutPolicy, error) { + putPolicy, err := NewPutPolicyWithKey(bucket, keyPrefix, expiry) + if err != nil { + return nil, err + } + return putPolicy.SetIsPrefixalScope(1), nil +} + +// GetScope 获取指定的上传的目标资源空间 Bucket 和资源键 Key +func (putPolicy PutPolicy) GetScope() (string, bool) { + return putPolicy.getString(putPolicyKeyScope) +} + +// SetScope 指定上传的目标资源空间 Bucket 和资源键 Key +func (putPolicy PutPolicy) SetScope(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeyScope, value) + return putPolicy +} + +// GetDeadline 获取上传策略有效截止时间 +func (putPolicy PutPolicy) GetDeadline() (int64, bool) { + return putPolicy.getInt64(putPolicyKeyDeadline) +} + +// SetDeadline 指定上传策略有效截止时间 +func (putPolicy PutPolicy) SetDeadline(value int64) PutPolicy { + _ = putPolicy.Set(putPolicyKeyDeadline, value) + return putPolicy +} + +// GetIsPrefixalScope 获取是否允许用户上传以 scope 的 keyPrefix 为前缀的文件 +func (putPolicy PutPolicy) GetIsPrefixalScope() (int64, bool) { + return putPolicy.getInt64(putPolicyKeyIsPrefixalScope) +} + +// SetIsPrefixalScope 指定是否允许用户上传以 scope 的 keyPrefix 为前缀的文件 +func (putPolicy PutPolicy) SetIsPrefixalScope(value int64) PutPolicy { + _ = putPolicy.Set(putPolicyKeyIsPrefixalScope, value) + return putPolicy +} + +// GetInsertOnly 获取是否限定为新增语意 +func (putPolicy PutPolicy) GetInsertOnly() (int64, bool) { + return putPolicy.getInt64(putPolicyKeyInsertOnly) +} + +// SetInsertOnly 指定是否限定为新增语意 +func (putPolicy PutPolicy) SetInsertOnly(value int64) PutPolicy { + _ = putPolicy.Set(putPolicyKeyInsertOnly, value) + return putPolicy +} + +// GetEndUser 获取唯一属主标识 +func (putPolicy PutPolicy) GetEndUser() (string, bool) { + return putPolicy.getString(putPolicyKeyEndUser) +} + +// SetEndUser 指定唯一属主标识 +func (putPolicy PutPolicy) SetEndUser(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeyEndUser, value) + return putPolicy +} + +// GetReturnUrl 获取 Web 端文件上传成功后,浏览器执行 303 跳转的 URL +func (putPolicy PutPolicy) GetReturnUrl() (string, bool) { + return putPolicy.getString(putPolicyKeyReturnUrl) +} + +// SetReturnUrl 指定 Web 端文件上传成功后,浏览器执行 303 跳转的 URL +func (putPolicy PutPolicy) SetReturnUrl(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeyReturnUrl, value) + return putPolicy +} + +// GetReturnBody 获取上传成功后,自定义七牛云最终返回给上传端的数据 +func (putPolicy PutPolicy) GetReturnBody() (string, bool) { + return putPolicy.getString(putPolicyKeyReturnBody) +} + +// SetReturnBody 指定上传成功后,自定义七牛云最终返回给上传端的数据 +func (putPolicy PutPolicy) SetReturnBody(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeyReturnBody, value) + return putPolicy +} + +// GetCallbackUrl 获取上传成功后,七牛云向业务服务器发送 POST 请求的 URL +func (putPolicy PutPolicy) GetCallbackUrl() (string, bool) { + return putPolicy.getString(putPolicyKeyCallbackUrl) +} + +// SetCallbackUrl 指定上传成功后,七牛云向业务服务器发送 POST 请求的 URL +func (putPolicy PutPolicy) SetCallbackUrl(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeyCallbackUrl, value) + return putPolicy +} + +// GetCallbackHost 获取上传成功后,七牛云向业务服务器发送回调通知时的 Host 值 +func (putPolicy PutPolicy) GetCallbackHost() (string, bool) { + return putPolicy.getString(putPolicyKeyCallbackHost) +} + +// SetCallbackHost 指定上传成功后,七牛云向业务服务器发送回调通知时的 Host 值 +func (putPolicy PutPolicy) SetCallbackHost(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeyCallbackHost, value) + return putPolicy +} + +// GetCallbackBody 获取上传成功后,七牛云向业务服务器发送 Content-Type: application/x-www-form-urlencoded 的 POST 请求 +func (putPolicy PutPolicy) GetCallbackBody() (string, bool) { + return putPolicy.getString(putPolicyKeyCallbackBody) +} + +// SetCallbackBody 指定上传成功后,七牛云向业务服务器发送 Content-Type: application/x-www-form-urlencoded 的 POST 请求 +func (putPolicy PutPolicy) SetCallbackBody(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeyCallbackBody, value) + return putPolicy +} + +// GetCallbackBodyType 获取上传成功后,七牛云向业务服务器发送回调通知 callbackBody 的 Content-Type +func (putPolicy PutPolicy) GetCallbackBodyType() (string, bool) { + return putPolicy.getString(putPolicyKeyCallbackBodyType) +} + +// SetCallbackBodyType 指定上传成功后,七牛云向业务服务器发送回调通知 callbackBody 的 Content-Type +func (putPolicy PutPolicy) SetCallbackBodyType(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeyCallbackBodyType, value) + return putPolicy +} + +// GetPersistentOps 获取资源上传成功后触发执行的预转持久化处理指令列表 +func (putPolicy PutPolicy) GetPersistentOps() (string, bool) { + return putPolicy.getString(putPolicyKeyPersistentOps) +} + +// SetPersistentOps 指定资源上传成功后触发执行的预转持久化处理指令列表 +func (putPolicy PutPolicy) SetPersistentOps(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeyPersistentOps, value) + return putPolicy +} + +// GetPersistentNotifyUrl 获取接收持久化处理结果通知的 URL +func (putPolicy PutPolicy) GetPersistentNotifyUrl() (string, bool) { + return putPolicy.getString(putPolicyKeyPersistentNotifyUrl) +} + +// SetPersistentNotifyUrl 指定接收持久化处理结果通知的 URL +func (putPolicy PutPolicy) SetPersistentNotifyUrl(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeyPersistentNotifyUrl, value) + return putPolicy +} + +// GetPersistentPipeline 获取转码队列名 +func (putPolicy PutPolicy) GetPersistentPipeline() (string, bool) { + return putPolicy.getString(putPolicyKeyPersistentPipeline) +} + +// SetPersistentPipeline 指定转码队列名 +func (putPolicy PutPolicy) SetPersistentPipeline(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeyPersistentPipeline, value) + return putPolicy +} + +// GetForceSaveKey 获取 saveKey 的优先级设置 +func (putPolicy PutPolicy) GetForceSaveKey() (bool, bool) { + return putPolicy.getBool(putPolicyKeyForceSaveKey) +} + +// SetForceSaveKey 指定 saveKey 的优先级设置 +func (putPolicy PutPolicy) SetForceSaveKey(value bool) PutPolicy { + _ = putPolicy.Set(putPolicyKeyForceSaveKey, value) + return putPolicy +} + +// GetSaveKey 获取自定义资源名 +func (putPolicy PutPolicy) GetSaveKey() (string, bool) { + return putPolicy.getString(putPolicyKeySaveKey) +} + +// SetSaveKey 指定自定义资源名 +func (putPolicy PutPolicy) SetSaveKey(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeySaveKey, value) + return putPolicy +} + +// GetFsizeMin 获取限定上传文件大小最小值 +func (putPolicy PutPolicy) GetFsizeMin() (int64, bool) { + return putPolicy.getInt64(putPolicyKeyFsizeMin) +} + +// SetFsizeMin 指定限定上传文件大小最小值 +func (putPolicy PutPolicy) SetFsizeMin(value int64) PutPolicy { + _ = putPolicy.Set(putPolicyKeyFsizeMin, value) + return putPolicy +} + +// GetFsizeLimit 获取限定上传文件大小最大值 +func (putPolicy PutPolicy) GetFsizeLimit() (int64, bool) { + return putPolicy.getInt64(putPolicyKeyFsizeLimit) +} + +// SetFsizeLimit 指定限定上传文件大小最大值 +func (putPolicy PutPolicy) SetFsizeLimit(value int64) PutPolicy { + _ = putPolicy.Set(putPolicyKeyFsizeLimit, value) + return putPolicy +} + +// GetDetectMime 获取开启 MimeType 侦测功能 +func (putPolicy PutPolicy) GetDetectMime() (int64, bool) { + return putPolicy.getInt64(putPolicyKeyDetectMime) +} + +// SetDetectMime 指定开启 MimeType 侦测功能 +func (putPolicy PutPolicy) SetDetectMime(value int64) PutPolicy { + _ = putPolicy.Set(putPolicyKeyDetectMime, value) + return putPolicy +} + +// GetMimeLimit 获取限定用户上传的文件类型 +func (putPolicy PutPolicy) GetMimeLimit() (string, bool) { + return putPolicy.getString(putPolicyKeyMimeLimit) +} + +// SetMimeLimit 指定限定用户上传的文件类型 +func (putPolicy PutPolicy) SetMimeLimit(value string) PutPolicy { + _ = putPolicy.Set(putPolicyKeyMimeLimit, value) + return putPolicy +} + +// GetFileType 获取文件存储类型 +func (putPolicy PutPolicy) GetFileType() (int64, bool) { + return putPolicy.getInt64(putPolicyKeyFileType) +} + +// SetFileType 指定文件存储类型 +func (putPolicy PutPolicy) SetFileType(value int64) PutPolicy { + _ = putPolicy.Set(putPolicyKeyFileType, value) + return putPolicy +} + +// Get 获取上传策略的值 +func (putPolicy PutPolicy) Get(key string) (value interface{}, ok bool) { + value, ok = putPolicy[key] + return +} + +func (putPolicy PutPolicy) getString(key string) (string, bool) { + v, ok := putPolicy[key] + if !ok { + return "", false + } + s, ok := v.(string) + return s, ok +} + +func (putPolicy PutPolicy) getBool(key string) (bool, bool) { + v, ok := putPolicy[key] + if !ok { + return false, false + } + b, ok := v.(bool) + return b, ok +} + +func (putPolicy PutPolicy) getInt64(key string) (int64, bool) { + if v, ok := putPolicy[key]; ok { + switch i := v.(type) { + case int64: + return i, true + case float64: + return int64(i), true + case float32: + return int64(i), true + case int32: + return int64(i), true + case int16: + return int64(i), true + case int8: + return int64(i), true + case int: + return int64(i), true + case uint64: + return int64(i), true + case uint32: + return int64(i), true + case uint16: + return int64(i), true + case uint8: + return int64(i), true + case uint: + return int64(i), true + case json.Number: + i64, err := i.Int64() + return i64, err == nil + } + } + return 0, false +} + +// Set 设置上传策略的值 +// +// 如果 value 为 nil,则返回 ErrInvalidPolicyValue 错误。 +func (putPolicy PutPolicy) Set(key string, value interface{}) error { + if value == nil { + return &FieldError{Err: ErrInvalidPolicyValue} + } + putPolicy[key] = value + return nil +} + +// Delete 删除上传策略的值 +func (putPolicy PutPolicy) Delete(key string) (value interface{}, ok bool) { + value, ok = putPolicy[key] + delete(putPolicy, key) + return +} + +// GetBucketName 获取上传策略内的空间名称 +// +// 该方法会从上传策略中解析出空间名称,如果上传策略中没有 scope 字段,则返回 ErrEmptyBucketName 错误,如果上传策略的 scope 字段格式有误,则返回 ErrInvalidPolicyValue 错误。 +func (putPolicy PutPolicy) GetBucketName() (string, error) { + if scope, ok := putPolicy.GetScope(); !ok { + return "", ErrInvalidPolicyValue + } else { + fields := strings.SplitN(scope, ":", 2) + if len(fields) == 0 { + return "", ErrEmptyBucketName + } + return fields[0], nil + } +} diff --git a/storagev2/uptoken/uploadtoken.go b/storagev2/uptoken/uploadtoken.go new file mode 100644 index 00000000..78d9302d --- /dev/null +++ b/storagev2/uptoken/uploadtoken.go @@ -0,0 +1,150 @@ +package uptoken + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "strings" + "sync" + + "github.com/qiniu/go-sdk/v7/storagev2/credentials" +) + +// ErrInvalidUpToken 非法的上传凭证 +var ErrInvalidUpToken = errors.New("invalid upToken") + +type ( + // PutPolicyProvider 获取上传策略接口 + PutPolicyProvider interface { + GetPutPolicy(context.Context) (PutPolicy, error) + } + // AccessKeyProvider 获取 AccessKey 接口 + AccessKeyProvider interface { + GetAccessKey(context.Context) (string, error) + } + // UpTokenProvider 获取上传凭证接口 + UpTokenProvider interface { + GetUpToken(context.Context) (string, error) + } + // Provider 获取上传凭证,AccessKey 和上传策略接口 + Provider interface { + PutPolicyProvider + AccessKeyProvider + UpTokenProvider + } + signer struct { + putPolicy PutPolicy + credentialsProvider credentials.CredentialsProvider + onceCredentials sync.Once + upToken string + credentials *credentials.Credentials + } + parser struct { + upToken string + putPolicy PutPolicy + accessKey string + splits []string + } +) + +// NewSigner 创建上传凭证签发器 +// +// 需要注意的是 NewSigner 仅仅只会通过 credentials.CredentialsProvider 获取一次鉴权参数,之后就会缓存该鉴权参数,不会反复获取 +func NewSigner(putPolicy PutPolicy, credentialsProvider credentials.CredentialsProvider) Provider { + return &signer{putPolicy: putPolicy, credentialsProvider: credentialsProvider} +} + +func (signer *signer) GetPutPolicy(context.Context) (PutPolicy, error) { + return signer.putPolicy, nil +} + +func (signer *signer) GetAccessKey(ctx context.Context) (string, error) { + var err error + credentials, err := signer.onceGetCredentials(ctx) + if err != nil { + return "", err + } + return credentials.AccessKey, nil +} + +func (signer *signer) GetUpToken(ctx context.Context) (string, error) { + return signer.onceGetUpToken(ctx) +} + +func (signer *signer) onceGetCredentials(ctx context.Context) (*credentials.Credentials, error) { + var err error + signer.onceCredentials.Do(func() { + signer.credentials, err = signer.credentialsProvider.Get(ctx) + }) + return signer.credentials, err +} + +func (signer *signer) onceGetUpToken(ctx context.Context) (string, error) { + var err error + if signer.upToken != "" { + return signer.upToken, nil + } + credentials, err := signer.onceGetCredentials(ctx) + if err != nil { + return "", nil + } + putPolicyJson, err := json.Marshal(signer.putPolicy) + if err != nil { + return "", nil + } + signer.upToken = credentials.SignWithData(putPolicyJson) + return signer.upToken, nil +} + +// NewParser 创建上传凭证签发器 +func NewParser(upToken string) Provider { + return &parser{upToken: upToken} +} + +func (parser *parser) GetPutPolicy(context.Context) (PutPolicy, error) { + if parser.putPolicy != nil { + return parser.putPolicy, nil + } + splits, ok := parser.onceGetSplits() + if !ok { + return nil, ErrInvalidUpToken + } + putPolicyJson, err := base64.URLEncoding.DecodeString(splits[2]) + if err != nil { + return nil, ErrInvalidUpToken + } + err = json.Unmarshal(putPolicyJson, &parser.putPolicy) + return parser.putPolicy, err +} + +func (parser *parser) GetAccessKey(context.Context) (string, error) { + if parser.accessKey != "" { + return parser.accessKey, nil + } + splits, ok := parser.onceGetSplits() + if !ok { + return "", ErrInvalidUpToken + } + parser.accessKey = splits[0] + return parser.accessKey, nil +} + +func (parser *parser) onceGetSplits() ([]string, bool) { + if len(parser.splits) > 0 { + return parser.splits, true + } + splits := strings.Split(parser.upToken, ":") + if len(splits) == 5 && splits[0] == "" { + splits = splits[2:] + } + if len(splits) != 3 { + return nil, false + } + parser.splits = splits + return parser.splits, true +} + +func (parser *parser) GetUpToken(context.Context) (string, error) { + return parser.upToken, nil +} diff --git a/storagev2/uptoken/uploadtoken_test.go b/storagev2/uptoken/uploadtoken_test.go new file mode 100644 index 00000000..3560ebb2 --- /dev/null +++ b/storagev2/uptoken/uploadtoken_test.go @@ -0,0 +1,52 @@ +//go:build unit +// +build unit + +package uptoken_test + +import ( + "context" + "testing" + "time" + + "github.com/qiniu/go-sdk/v7/storagev2/credentials" + "github.com/qiniu/go-sdk/v7/storagev2/uptoken" +) + +func TestSignPutPolicy(t *testing.T) { + const expectedBucketName = "testbucket" + const expectedAccessKey = "testaccesskey" + const expectedSecretKey = "testsecretkey" + const expectedExpires = int64(1675937798) + + putPolicy, err := uptoken.NewPutPolicy(expectedBucketName, time.Unix(expectedExpires, 0)) + if err != nil { + t.Fatalf("failed to create put policy: %s", err) + } + + if scope, _ := putPolicy.GetScope(); scope != expectedBucketName { + t.Fatalf("unexpected bucket name: %s", expectedBucketName) + } + + if actualDeadline, _ := putPolicy.GetDeadline(); actualDeadline != expectedExpires { + t.Fatalf("unexpected deadline: %d", actualDeadline) + } + + signer := uptoken.NewSigner(putPolicy, credentials.NewCredentials(expectedAccessKey, expectedSecretKey)) + upToken, err := signer.GetUpToken(context.Background()) + if err != nil { + t.Fatalf("failed to retrieve uptoken: %s", err) + } + + parser := uptoken.NewParser(upToken) + if actualAccessKey, err := parser.GetAccessKey(context.Background()); err != nil { + t.Fatalf("failed to retrieve accessKey: %s", err) + } else if actualAccessKey != expectedAccessKey { + t.Fatalf("unexpected accessKey: %s", actualAccessKey) + } + + if actualPutPolicy, err := parser.GetPutPolicy(context.Background()); err != nil { + t.Fatalf("failed to retrieve putPolicy: %s", err) + } else if actualScope, _ := actualPutPolicy.GetScope(); actualScope != expectedBucketName { + t.Fatalf("unexpected scope: %s", actualScope) + } +} diff --git a/types.go b/types.go index e46b7683..f8dd17bf 100644 --- a/types.go +++ b/types.go @@ -1,22 +1,24 @@ package api import ( + "bytes" "io" - "io/ioutil" "net/http" + + internal_io "github.com/qiniu/go-sdk/v7/internal/io" ) // BytesFromRequest 读取 http.Request.Body 的内容到 slice 中 -func BytesFromRequest(r *http.Request) (b []byte, err error) { - if r.ContentLength == 0 { - return +func BytesFromRequest(r *http.Request) ([]byte, error) { + if bytesNopCloser, ok := r.Body.(*internal_io.BytesNopCloser); ok { + return bytesNopCloser.Bytes(), nil } - if r.ContentLength > 0 { - b = make([]byte, int(r.ContentLength)) - _, err = io.ReadFull(r.Body, b) - return + buf := bytes.NewBuffer(make([]byte, 0, int(r.ContentLength)+1024)) + _, err := io.Copy(buf, r.Body) + if err != nil { + return nil, err } - return ioutil.ReadAll(r.Body) + return buf.Bytes(), nil } // SeekerLen 通过 io.Seeker 获取数据大小