diff --git a/cmd/ore/aws/upload.go b/cmd/ore/aws/upload.go index f7474b819..3f3acc92a 100644 --- a/cmd/ore/aws/upload.go +++ b/cmd/ore/aws/upload.go @@ -221,7 +221,7 @@ func runUpload(cmd *cobra.Command, args []string) error { } defer f.Close() - err = API.UploadObject(f, s3BucketName, s3ObjectPath, uploadForce) + err = API.UploadObject(f, s3BucketName, s3ObjectPath, uploadForce, "") if err != nil { fmt.Fprintf(os.Stderr, "Error uploading: %v\n", err) os.Exit(1) diff --git a/cmd/plume/fcos.go b/cmd/plume/fcos.go new file mode 100644 index 000000000..379887a0d --- /dev/null +++ b/cmd/plume/fcos.go @@ -0,0 +1,51 @@ +// Copyright 2019 Red Hat Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "github.com/spf13/pflag" +) + +var ( + specPolicy string + specCommitId string + + fcosSpec = fcosChannelSpec{ + Bucket: "fcos-builds", + Profile: "default", + Region: "us-east-1", + } +) + +func AddFcosSpecFlags(flags *pflag.FlagSet) { + flags.StringVar(&specPolicy, "policy", "public-read", "Canned ACL policy") + flags.StringVar(&specCommitId, "commit-id", "", "OSTree Commit ID") +} + +func FcosValidateArguments() { + if specVersion == "" { + plog.Fatal("--version is required") + } + if specChannel == "" { + plog.Fatal("--channel is required") + } + if specCommitId == "" { + plog.Fatal("--commit-id is required") + } +} + +func FcosChannelSpec() fcosChannelSpec { + return fcosSpec +} diff --git a/cmd/plume/prerelease.go b/cmd/plume/prerelease.go index a2241316a..0e9f133d8 100644 --- a/cmd/plume/prerelease.go +++ b/cmd/plume/prerelease.go @@ -525,7 +525,7 @@ func awsUploadToPartition(spec *channelSpec, part *awsPartitionSpec, imageName, if snapshot == nil { plog.Printf("Creating S3 object %v...", s3ObjectURL) - err = api.UploadObject(f, part.Bucket, s3ObjectPath, false) + err = api.UploadObject(f, part.Bucket, s3ObjectPath, false, "") if err != nil { return nil, nil, fmt.Errorf("Error uploading: %v", err) } diff --git a/cmd/plume/release.go b/cmd/plume/release.go index 315810185..845d45101 100644 --- a/cmd/plume/release.go +++ b/cmd/plume/release.go @@ -15,8 +15,13 @@ package main import ( + "bytes" + "encoding/json" "fmt" + "io/ioutil" "net/http" + "net/url" + "path/filepath" "sort" "strings" "time" @@ -52,6 +57,7 @@ func init() { "perform a trial run, do not make changes") AddSpecFlags(cmdRelease.Flags()) AddFedoraSpecFlags(cmdRelease.Flags()) + AddFcosSpecFlags(cmdRelease.Flags()) root.AddCommand(cmdRelease) } @@ -61,11 +67,30 @@ func runRelease(cmd *cobra.Command, args []string) { if err := runCLRelease(cmd, args); err != nil { plog.Fatal(err) } + case "fcos": + if err := runFcosRelease(cmd, args); err != nil { + plog.Fatal(err) + } default: plog.Fatalf("Unknown distro %q:", selectedDistro) } } +func runFcosRelease(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + plog.Fatal("No args accepted") + } + + spec := FcosChannelSpec() + FcosValidateArguments() + + doS3(&spec) + + modifyReleaseMetadataIndex(&spec, specCommitId) + + return nil +} + func runFedoraRelease(cmd *cobra.Command, args []string) error { if len(args) > 0 { plog.Fatal("No args accepted") @@ -445,3 +470,88 @@ func doAWS(ctx context.Context, client *http.Client, src *storage.Bucket, spec * } } } + +func doS3(spec *fcosChannelSpec) { + api, err := aws.New(&aws.Options{ + CredentialsFile: awsCredentialsFile, + Profile: spec.Profile, + Region: spec.Region, + }) + if err != nil { + plog.Fatalf("creating aws client: %v", err) + } + + // Assumes the bucket layout defined inside of + // https://github.com/coreos/fedora-coreos-tracker/issues/189 + err = api.UpdateBucketObjectsACL(spec.Bucket, filepath.Join("prod", "streams", specChannel, "builds", specVersion), specPolicy) + if err != nil { + plog.Fatalf("updating object ACLs: %v", err) + } +} + +func modifyReleaseMetadataIndex(spec *fcosChannelSpec, commitId string) { + api, err := aws.New(&aws.Options{ + CredentialsFile: awsCredentialsFile, + Profile: spec.Profile, + Region: spec.Region, + }) + if err != nil { + plog.Fatalf("creating aws client: %v", err) + } + + path := filepath.Join("prod", "streams", specChannel, "releases.json") + + f, err := api.DownloadFile(spec.Bucket, path) + if err != nil { + plog.Fatalf("downloading release metadata index: %v", err) + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + plog.Fatalf("reading release metadata index: %v", err) + } + + var m ReleaseMetadata + err = json.Unmarshal(data, &m) + if err != nil { + plog.Fatalf("unmarshaling release metadata json: %v", err) + } + + url, err := url.Parse(fmt.Sprintf("https://%s.s3.amazonaws.com/prod/streams/%s/builds/%s/release.json", spec.Bucket, specChannel, specVersion)) + if err != nil { + plog.Fatalf("creating metadata url: %v", err) + } + + newRel := BuildMetadata{ + CommitHash: specCommitId, + Version: specVersion, + Endpoint: url.String(), + } + + for i, rel := range m.Releases { + if rel == newRel { + if i != (len(m.Releases) - 1) { + plog.Fatalf("build is already present and is not the latest release") + } + + // the build is already the latest release, exit + return + } + } + + m.Releases = append(m.Releases, newRel) + + m.Metadata.LastModified = time.Now().UTC().Format("2006-01-02T15:04:05Z") + m.Note = "not for general usage" + + out, err := json.Marshal(m) + if err != nil { + plog.Fatalf("marshalling release metadata json: %v", err) + } + + err = api.UploadObject(bytes.NewReader(out), spec.Bucket, path, true, specPolicy) + if err != nil { + plog.Fatalf("uploading release metadata json: %v", err) + } +} diff --git a/cmd/plume/types.go b/cmd/plume/types.go index 9c1f7db64..54ac4f3aa 100644 --- a/cmd/plume/types.go +++ b/cmd/plume/types.go @@ -78,3 +78,25 @@ type channelSpec struct { Azure azureSpec AWS awsSpec } + +type fcosChannelSpec struct { + Bucket string + Profile string + Region string +} + +type ReleaseMetadata struct { + Note string `json:"note"` // used to note to users not to consume the release metadata index + Releases []BuildMetadata `json:"releases"` + Metadata Metadata `json:"metadata"` +} + +type BuildMetadata struct { + CommitHash string `json:"commit"` + Version string `json:"version"` + Endpoint string `json:"endpoint"` +} + +type Metadata struct { + LastModified string `json:"last-modified"` +} diff --git a/platform/api/aws/s3.go b/platform/api/aws/s3.go index 4b1c82222..12e288cda 100644 --- a/platform/api/aws/s3.go +++ b/platform/api/aws/s3.go @@ -17,6 +17,9 @@ package aws import ( "fmt" "io" + "io/ioutil" + "net/url" + "os" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -43,7 +46,7 @@ func s3IsNotFound(err error) bool { } // UploadObject uploads an object to S3 -func (a *API) UploadObject(r io.Reader, bucket, path string, force bool) error { +func (a *API) UploadObject(r io.Reader, bucket, path string, force bool, policy string) error { s3uploader := s3manager.NewUploaderWithClient(a.s3) if !force { @@ -66,6 +69,7 @@ func (a *API) UploadObject(r io.Reader, bucket, path string, force bool) error { Body: r, Bucket: aws.String(bucket), Key: aws.String(path), + ACL: aws.String(policy), }) if err != nil { return fmt.Errorf("error uploading s3://%v/%v: %v", bucket, path, err) @@ -98,3 +102,104 @@ func (a *API) InitializeBucket(bucket string) error { } return err } + +// This will modify the ACL on Objects to one of the canned ACL policies +func (a *API) PutObjectAcl(bucket, path, policy string) error { + _, err := a.s3.PutObjectAcl(&s3.PutObjectAclInput{ + ACL: aws.String(policy), + Bucket: aws.String(bucket), + Key: aws.String(path), + }) + if err != nil { + return fmt.Errorf("setting object ACL: %v", err) + } + return nil +} + +// Copy an Object to a new location with a given canned ACL policy +func (a *API) CopyObject(srcBucket, srcPath, destBucket, destPath, policy string) error { + err := a.InitializeBucket(destBucket) + if err != nil { + return fmt.Errorf("creating destination bucket: %v", err) + } + _, err = a.s3.CopyObject(&s3.CopyObjectInput{ + ACL: aws.String(policy), + CopySource: aws.String(url.QueryEscape(fmt.Sprintf("%s/%s", srcBucket, srcPath))), + Bucket: aws.String(destBucket), + Key: aws.String(destPath), + }) + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == alreadyExistsErr { + return nil + } + } + } + return err +} + +// Copies all objects in srcBucket to destBucket with a given canned ACL policy +func (a *API) CopyBucket(srcBucket, prefix, destBucket, policy string) error { + objects, err := a.s3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(srcBucket), + Prefix: aws.String(prefix), + }) + if err != nil { + return fmt.Errorf("listing bucket: %v", err) + } + + err = a.InitializeBucket(destBucket) + if err != nil { + return fmt.Errorf("creating destination bucket: %v", err) + } + + for _, object := range objects.Contents { + path := *object.Key + err = a.CopyObject(srcBucket, path, destBucket, path, policy) + if err != nil { + return err + } + } + + return nil +} + +// TODO: bikeshed this name +// modifies the ACL of all objects of a given prefix in srcBucket to a given canned ACL policy +func (a *API) UpdateBucketObjectsACL(srcBucket, prefix, policy string) error { + objects, err := a.s3.ListObjects(&s3.ListObjectsInput{ + Bucket: aws.String(srcBucket), + Prefix: aws.String(prefix), + }) + if err != nil { + return fmt.Errorf("listing bucket: %v", err) + } + + for _, object := range objects.Contents { + path := *object.Key + err = a.PutObjectAcl(srcBucket, path, policy) + if err != nil { + return err + } + } + + return nil +} + +// Downloads a file from S3 to a temporary file. This file must be closed by the caller. +func (a *API) DownloadFile(srcBucket, srcPath string) (*os.File, error) { + f, err := ioutil.TempFile("", "mantle-file") + if err != nil { + return nil, err + } + downloader := s3manager.NewDownloader(a.session) + _, err = downloader.Download(f, &s3.GetObjectInput{ + Bucket: aws.String(srcBucket), + Key: aws.String(srcPath), + }) + if err != nil { + f.Close() + return nil, err + } + return f, nil +}