Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Decomposedfs lookup outermost root #2373

Closed
wants to merge 32 commits into from
Closed
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
427faf4
decomposedfs: convert node.spaceroot into a simple id string
butonic Dec 13, 2021
97ea476
add changelog
butonic Dec 13, 2021
40292ed
add comment
butonic Dec 13, 2021
9617499
remember share root when checking permissions
butonic Dec 13, 2021
011851c
shorten changelog title
butonic Dec 13, 2021
79ef8c0
fix changelog typos
butonic Dec 13, 2021
f689431
fix mimetype
butonic Dec 14, 2021
45116c1
fix comment
butonic Dec 14, 2021
a92acf9
lint
butonic Dec 14, 2021
743e7a3
wtf
butonic Dec 15, 2021
99da3c0
fix lint
butonic Dec 15, 2021
fb372cf
remove unused code
butonic Dec 15, 2021
928d165
this might actually work
butonic Dec 17, 2021
7e1cded
Merge branch 'edge' into decomposedfs-lookup-outermost-root
butonic Dec 17, 2021
7026d61
work
butonic Dec 17, 2021
342fd57
it works in ocis
butonic Dec 17, 2021
6b93948
return a spaces when id happens to match a share
butonic Dec 17, 2021
ea79bb6
works?
butonic Dec 17, 2021
bc276a9
final path fix
butonic Dec 20, 2021
3c72a16
fix cs3 ref parsing
butonic Dec 20, 2021
11af1ec
only look up grants for id based requests
butonic Dec 20, 2021
7cbbde5
fix stating of mounted child in share jail root
butonic Dec 20, 2021
d4a7762
add missing comment
butonic Dec 20, 2021
c53f191
inline if statement
butonic Dec 20, 2021
dc26784
use relative reference when looking up responsible forvider for a share
butonic Dec 20, 2021
24366a0
use . in path to make relative requests when resolving shares
butonic Dec 20, 2021
d59b008
Merge branch 'edge' into decomposedfs-lookup-outermost-root
butonic Dec 20, 2021
f2bcc8c
use ctxpkg instead of revactx as package name
butonic Dec 20, 2021
319edf7
always prefix path with / in list share response
butonic Dec 20, 2021
b6ca718
decomposedfs: make lookup.Path take into account share roots
butonic Dec 20, 2021
c8fad3b
fix gateway handling relative path responses
butonic Dec 20, 2021
072424a
add test to cross storage move not supported section
butonic Dec 20, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions changelog/unreleased/decomposedfs-lookup-outermost-root.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
Enhancement: The decomposedfs now returns the outermost space root

When stating a resource by id the decomposedfs will now also return the outer most space root. This allows the gateway to properly reconstruct global paths by combining the space mountpoint and the relative path.
For the owner the root will be the space root. For share recipients the root will be the outer most shared resource. For now, the root is returned as an opaque property.

https://github.com/cs3org/reva/pull/2373
310 changes: 165 additions & 145 deletions internal/grpc/services/gateway/storageprovider.go

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ import (
rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1"
collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
typesv1beta1 "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
"github.com/cs3org/reva/pkg/appctx"
"github.com/cs3org/reva/pkg/errtypes"
"github.com/cs3org/reva/pkg/rgrpc"
Expand Down Expand Up @@ -304,76 +305,80 @@ func (s *service) CreateStorageSpace(ctx context.Context, req *provider.CreateSt

func (s *service) ListStorageSpaces(ctx context.Context, req *provider.ListStorageSpacesRequest) (*provider.ListStorageSpacesResponse, error) {

spaceTypes := []string{}
res := &provider.ListStorageSpacesResponse{
Status: status.NewOK(ctx),
}
spaceID := &provider.ResourceId{}
for _, f := range req.Filters {
switch f.Type {
case provider.ListStorageSpacesRequest_Filter_TYPE_SPACE_TYPE:
if f.GetSpaceType() != "share" {
return &provider.ListStorageSpacesResponse{
Status: &rpc.Status{Code: rpc.Code_CODE_OK},
}, nil
}
spaceTypes = append(spaceTypes, f.GetSpaceType())
case provider.ListStorageSpacesRequest_Filter_TYPE_ID:
spaceid, _ := utils.SplitStorageSpaceID(f.GetId().OpaqueId)
if spaceid != utils.ShareStorageProviderID {
return &provider.ListStorageSpacesResponse{
// a specific id was requested, return not found instead of empty list
Status: &rpc.Status{Code: rpc.Code_CODE_NOT_FOUND},
}, nil
spaceID.StorageId, spaceID.OpaqueId = utils.SplitStorageSpaceID(f.GetId().OpaqueId)
if spaceID.StorageId == "" || spaceID.OpaqueId == "" {
res.Status = status.NewInvalid(ctx, "invalid space id")
return res, nil
}
}
}

lsRes, err := s.sharesProviderClient.ListReceivedShares(ctx, &collaboration.ListReceivedSharesRequest{})
if err != nil {
return nil, errors.Wrap(err, "sharesstorageprovider: error calling ListReceivedSharesRequest")
if len(spaceTypes) == 0 {
spaceTypes = []string{"mountpoint"}
}
if lsRes.Status.Code != rpc.Code_CODE_OK {
return nil, fmt.Errorf("sharesstorageprovider: error calling ListReceivedSharesRequest")
}

res := &provider.ListStorageSpacesResponse{}
for i := range lsRes.Shares {

if lsRes.Shares[i].MountPoint == nil {
// the gateway needs a name to use as the path segment in the dir listing
continue
}
space := &provider.StorageSpace{
Id: &provider.StorageSpaceId{
// Do we need a unique spaceid for every share?
// we are going to use the opaque id of the resource as the spaceid
OpaqueId: "a0ca6a90-a365-4782-871e-d44447bbc668!" + lsRes.Shares[i].Share.ResourceId.OpaqueId,
},
SpaceType: "share",
Owner: &userv1beta1.User{Id: lsRes.Shares[i].Share.Owner},
// return the actual resource id
//Root: lsRes.Shares[i].Share.ResourceId,
Root: &provider.ResourceId{
StorageId: utils.ShareStorageProviderID,
OpaqueId: lsRes.Shares[i].Share.ResourceId.OpaqueId,
},
// TODO in the future the spaces registry will handle the alias for share spaces.
// for now use the name
Name: lsRes.Shares[i].MountPoint.Path,
}

// TODO the gateway needs to stat if it needs the mtime
/*
info, st, err := s.statResource(ctx, lsRes.Shares[i].Share.ResourceId, "")
for i := range spaceTypes {
switch spaceTypes[i] {
case "mountpoint":
lsRes, err := s.sharesProviderClient.ListReceivedShares(ctx, &collaboration.ListReceivedSharesRequest{
// FIXME filter by received shares for resource id - listing all shares is tooo expensive!
})
if err != nil {
return nil, err
return nil, errors.Wrap(err, "sharesstorageprovider: error calling ListReceivedSharesRequest")
}
if st.Code != rpc.Code_CODE_OK {
continue
if lsRes.Status.Code != rpc.Code_CODE_OK {
return nil, fmt.Errorf("sharesstorageprovider: error calling ListReceivedSharesRequest")
}
space.Mtime = info.Mtime
*/

// what if we don't have a name?
res.StorageSpaces = append(res.StorageSpaces, space)
for i := range lsRes.Shares {
root := &provider.ResourceId{
StorageId: utils.ShareStorageProviderID,
OpaqueId: lsRes.Shares[i].Share.Id.OpaqueId,
}
// do we filter by id
if spaceID.StorageId != "" {
switch {
case utils.ResourceIDEqual(spaceID, root):
// we have a virtual node
case utils.ResourceIDEqual(spaceID, lsRes.Shares[i].Share.ResourceId):
// we have a mount point
//root = lsRes.Shares[i].Share.ResourceId
default:
// none of our business
continue
}
}
space := &provider.StorageSpace{
Id: &provider.StorageSpaceId{
OpaqueId: root.StorageId + "!" + root.OpaqueId,
},
SpaceType: "mountpoint",
Owner: &userv1beta1.User{Id: lsRes.Shares[i].Share.Owner}, // FIXME actually, the mount point belongs to the recipient
// the sharesstorageprovider keeps track of mount points
Root: root,
}

// TODO in the future the spaces registry will handle the alias for share spaces.
// for now use the name from the share to override the name determined by stat
if lsRes.Shares[i].MountPoint != nil {
space.Name = lsRes.Shares[i].MountPoint.Path
}

// what if we don't have a name?
res.StorageSpaces = append(res.StorageSpaces, space)
}
}
}
res.Status = status.NewOK(ctx)

return res, nil
}

Expand Down Expand Up @@ -477,7 +482,10 @@ func (s *service) Move(ctx context.Context, req *provider.MoveRequest) (*provide
len(strings.SplitN(req.Destination.Path, "/", 3)) == 2 {

// Change the MountPoint of the share, it has no relative prefix
srcReceivedShare.MountPoint = &provider.Reference{Path: filepath.Base(req.Destination.Path)}
srcReceivedShare.MountPoint = &provider.Reference{
// FIXME actually it does have a resource id: the one of the sharesstorageprovider
Path: filepath.Base(req.Destination.Path),
}

_, err = s.sharesProviderClient.UpdateReceivedShare(ctx, &collaboration.UpdateReceivedShareRequest{
Share: srcReceivedShare,
Expand Down Expand Up @@ -535,15 +543,44 @@ func (s *service) Stat(ctx context.Context, req *provider.StatRequest) (*provide
Status: rpcStatus,
}, nil
}
if receivedShare.State == collaboration.ShareState_SHARE_STATE_PENDING {
return &provider.StatResponse{
Status: &rpc.Status{Code: rpc.Code_CODE_NOT_FOUND},
// not mounted yet
}, nil
}

return s.gateway.Stat(ctx, &provider.StatRequest{
sRes, err := s.gateway.Stat(ctx, &provider.StatRequest{
Opaque: req.Opaque,
Ref: &provider.Reference{
ResourceId: receivedShare.Share.ResourceId,
Path: req.Ref.Path,
Path: req.Ref.Path, // TODO can path ever best else than ""?
},
ArbitraryMetadataKeys: req.ArbitraryMetadataKeys,
})

if err == nil && sRes.Status.Code == rpc.Code_CODE_OK {
if sRes.Info.Opaque == nil {
sRes.Info.Opaque = &typesv1beta1.Opaque{
Map: map[string]*typesv1beta1.OpaqueEntry{},
}
} else if sRes.Info.Opaque.Map == nil {
sRes.Info.Opaque.Map = map[string]*typesv1beta1.OpaqueEntry{}
}
// set root to the sharesstorageprovider
sRes.Info.Opaque.Map["root"] = &typesv1beta1.OpaqueEntry{
Decoder: "plain",
Value: []byte(utils.ShareStorageProviderID),
}
// overwrite id to make subsequent stat calls use the mount point
// of the sharesstorageprovider to build absolute paths
sRes.Info.Id = &provider.ResourceId{
StorageId: utils.ShareStorageProviderID,
OpaqueId: receivedShare.Share.Id.OpaqueId,
}
}

return sRes, err
}

func (s *service) ListContainerStream(req *provider.ListContainerStreamRequest, ss provider.ProviderAPI_ListContainerStreamServer) error {
Expand Down Expand Up @@ -682,7 +719,9 @@ func (s *service) resolveReference(ctx context.Context, ref *provider.Reference)
}
if utils.IsRelativeReference(ref) {
// look up share for this resourceid
lsRes, err := s.sharesProviderClient.ListReceivedShares(ctx, &collaboration.ListReceivedSharesRequest{})
lsRes, err := s.sharesProviderClient.ListReceivedShares(ctx, &collaboration.ListReceivedSharesRequest{
// FIXME filter by received shares for resource id - listing all shares is tooo expensive!
})
if err != nil {
return nil, nil, errors.Wrap(err, "sharesstorageprovider: error calling ListReceivedSharesRequest")
}
Expand All @@ -691,7 +730,7 @@ func (s *service) resolveReference(ctx context.Context, ref *provider.Reference)
}
for _, rs := range lsRes.Shares {
// match the opaque id
if rs.Share.ResourceId.OpaqueId == ref.ResourceId.OpaqueId {
if rs.Share.Id.OpaqueId == ref.ResourceId.OpaqueId {
return rs, nil, nil
}
}
Expand Down
2 changes: 2 additions & 0 deletions internal/http/services/archiver/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,8 @@ func (s *svc) getFiles(ctx context.Context, files, ids []string) ([]string, erro
},
},
})
// FIXME an id based stat on a shared file currently returns the owners path ... not the shared path ... hmm?
// has nothing to do with caching

switch {
case err != nil:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -812,27 +812,6 @@ func (h *Handler) listSharesWithMe(w http.ResponseWriter, r *http.Request) {
return
}

// in a jailed namespace we have to point to the mount point in the users /Shares jail
// to do that we have to list the /Shares jail and use those paths instead of stating the shared resources
// The stat results would start with a path outside the jail and thus be inaccessible

var shareJailInfos []*provider.ResourceInfo

if h.sharePrefix != "/" {
// we only need the path from the share jail for accepted shares
if stateFilter == collaboration.ShareState_SHARE_STATE_ACCEPTED || stateFilter == ocsStateUnknown {
// only log errors. They may happen but we can continue trying to at least list the shares
lcRes, err := client.ListContainer(ctx, &provider.ListContainerRequest{
Ref: &provider.Reference{Path: path.Join(h.getHomeNamespace(revactx.ContextMustGetUser(ctx)), h.sharePrefix)},
})
if err != nil || lcRes.Status.Code != rpc.Code_CODE_OK {
h.logProblems(lcRes.GetStatus(), err, "could not list container, continuing without share jail path info")
} else {
shareJailInfos = lcRes.Infos
}
}
}

shares := make([]*conversions.ShareData, 0, len(lrsRes.GetShares()))

// TODO(refs) filter out "invalid" shares
Expand All @@ -851,10 +830,19 @@ func (h *Handler) listSharesWithMe(w http.ResponseWriter, r *http.Request) {
info = pinfo
} else {
var status *rpc.Status
info, status, err = h.getResourceInfoByID(ctx, client, rs.Share.ResourceId)
// FIXME the ResourceID is the id of the resource, but we want the id of the mount point so we can fetch that path, well we have the mountpoint path in the receivedshare
// first stat mount point
info, status, err = h.getResourceInfoByID(ctx, client, &provider.ResourceId{
StorageId: "a0ca6a90-a365-4782-871e-d44447bbc668",
OpaqueId: rs.Share.ResourceId.OpaqueId,
})
if err != nil || status.Code != rpc.Code_CODE_OK {
h.logProblems(status, err, "could not stat, skipping")
continue
// fallback to unmounted resource
info, status, err = h.getResourceInfoByID(ctx, client, rs.Share.ResourceId)
if err != nil || status.Code != rpc.Code_CODE_OK {
h.logProblems(status, err, "could not stat, skipping")
continue
}
}
}

Expand Down Expand Up @@ -906,11 +894,11 @@ func (h *Handler) listSharesWithMe(w http.ResponseWriter, r *http.Request) {
// Needed because received shares can be jailed in a folder in the users home

if h.sharePrefix != "/" {
// if we have share jail infos use them to build the path
if sji := findMatch(shareJailInfos, rs.Share.ResourceId); sji != nil {
// if we have a mount point use it to build the path
if rs.MountPoint != nil && rs.MountPoint.Path != "" {
// override path with info from share jail
data.FileTarget = path.Join(h.sharePrefix, path.Base(sji.Path))
data.Path = path.Join(h.sharePrefix, path.Base(sji.Path))
data.FileTarget = path.Join(h.sharePrefix, rs.MountPoint.Path)
data.Path = path.Join(h.sharePrefix, rs.MountPoint.Path)
} else {
data.FileTarget = path.Join(h.sharePrefix, path.Base(info.Path))
data.Path = path.Join(h.sharePrefix, path.Base(info.Path))
Expand All @@ -935,15 +923,6 @@ func (h *Handler) listSharesWithMe(w http.ResponseWriter, r *http.Request) {
response.WriteOCSSuccess(w, r, shares)
}

func findMatch(shareJailInfos []*provider.ResourceInfo, id *provider.ResourceId) *provider.ResourceInfo {
for i := range shareJailInfos {
if shareJailInfos[i].Id != nil && shareJailInfos[i].Id.StorageId == id.StorageId && shareJailInfos[i].Id.OpaqueId == id.OpaqueId {
return shareJailInfos[i]
}
}
return nil
}

func (h *Handler) listSharesWithOthers(w http.ResponseWriter, r *http.Request) {
shares := make([]*conversions.ShareData, 0)

Expand Down
Loading