From 85a91aa55506d80c9e3eac1852c13de94041e2cf Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Wed, 29 Nov 2023 12:46:26 +0800
Subject: [PATCH 01/25] feat: dangling node detection for graph.Memory

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go            |  45 +++++++++
 content/oci/oci_test.go       |  93 +++++++++++++++++++
 content/oci/storage.go        |  11 ++-
 internal/graph/memory.go      |  43 ++++++---
 internal/graph/memory_test.go | 167 ++++++++++++++++------------------
 5 files changed, 253 insertions(+), 106 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index 5c584f86..64d62ee0 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -23,6 +23,7 @@ import (
 	"errors"
 	"fmt"
 	"io"
+	"io/fs"
 	"os"
 	"path/filepath"
 	"sync"
@@ -454,6 +455,50 @@ func (s *Store) writeIndexFile() error {
 	return os.WriteFile(s.indexPath, indexJSON, 0666)
 }
 
+// GC removes garbage from Store. The garbage to be cleaned are:
+//   - unreferenced (dangling) blobs in Store which have no predecessors
+//   - garbage blobs in the storage whose metadata is not stored in Store
+func (s *Store) GC(ctx context.Context) error {
+	s.sync.Lock()
+	defer s.sync.Unlock()
+
+	// clean up dangling layers in Store
+	danglings := s.graph.GetDanglingLayers()
+	for _, desc := range danglings {
+		// do not remove existing manifests in the index
+		if _, err := s.tagResolver.Resolve(ctx, string(desc.Digest)); err == errdef.ErrNotFound {
+			// remove the blob and its metadata from the Store
+			// TODO: s.delete returns 2 variables, a dangling tree is not deleted properly
+			if _, err := s.delete(ctx, desc); err != nil {
+				return err
+			}
+		}
+	}
+
+	// clean up garbage blobs in the storage
+	rootpath := filepath.Join(s.root, "blobs")
+	return filepath.Walk(rootpath, func(path string, info fs.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+		// skip the directories
+		if !info.IsDir() {
+			alg := filepath.Base(filepath.Dir(path))
+			blobDigest, err := digest.Parse(fmt.Sprintf("%s:%s", alg, info.Name()))
+			if err != nil {
+				return err
+			}
+			if exists := s.graph.Exists(blobDigest); !exists {
+				// remove the blob from storage if it does not exist in Store
+				if err := s.storage.deleteByDigest(ctx, blobDigest); err != nil {
+					return err
+				}
+			}
+		}
+		return nil
+	})
+}
+
 // unsafeStore is used to bypass lock restrictions in Delete.
 type unsafeStore struct {
 	*Store
diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index 0bdfb4a4..c4cdc047 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -2844,6 +2844,99 @@ func TestStore_UntagErrorPath(t *testing.T) {
 	}
 }
 
+func TestStore_GC(t *testing.T) {
+	tempDir := t.TempDir()
+	s, err := New(tempDir)
+	if err != nil {
+		t.Fatal("New() error =", err)
+	}
+	ctx := context.Background()
+
+	// generate test content
+	var blobs [][]byte
+	var descs []ocispec.Descriptor
+	appendBlob := func(mediaType string, blob []byte) {
+		blobs = append(blobs, blob)
+		descs = append(descs, ocispec.Descriptor{
+			MediaType: mediaType,
+			Digest:    digest.FromBytes(blob),
+			Size:      int64(len(blob)),
+		})
+	}
+	generateManifest := func(config ocispec.Descriptor, layers ...ocispec.Descriptor) {
+		manifest := ocispec.Manifest{
+			Config: config,
+			Layers: layers,
+		}
+		manifestJSON, err := json.Marshal(manifest)
+		if err != nil {
+			t.Fatal(err)
+		}
+		appendBlob(ocispec.MediaTypeImageManifest, manifestJSON)
+	}
+
+	appendBlob(ocispec.MediaTypeImageConfig, []byte("config"))         // Blob 0
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("blob"))            // Blob 1
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer"))  // Blob 2, dangling layer
+	generateManifest(descs[0], descs[1])                               // Blob 3, valid manifest
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("garbage layer 1")) // Blob 4, garbage layer 1
+	generateManifest(descs[0], descs[4])                               // Blob 5, garbage manifest 1
+	appendBlob(ocispec.MediaTypeImageConfig, []byte("garbage config")) // Blob 6, garbage config
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("garbage layer 2")) // Blob 7, garbage layer 2
+	generateManifest(descs[6], descs[7])                               // Blob 8, garbage manifest 2
+
+	// push blobs[0] - blobs[3] into s
+	eg, egCtx := errgroup.WithContext(ctx)
+	for i := 0; i <= 3; i++ {
+		eg.Go(func(i int) func() error {
+			return func() error {
+				err := s.Push(egCtx, descs[i], bytes.NewReader(blobs[i]))
+				if err != nil {
+					return fmt.Errorf("failed to push test content to src: %d: %v", i, err)
+				}
+				return nil
+			}
+		}(i))
+	}
+	if err := eg.Wait(); err != nil {
+		t.Fatal(err)
+	}
+
+	// push blobs[4] - blobs[8] into s.storage, making them garbage as their metadata
+	// doesn't exist in s
+	for i := 4; i < len(blobs); i++ {
+		eg.Go(func(i int) func() error {
+			return func() error {
+				err := s.storage.Push(egCtx, descs[i], bytes.NewReader(blobs[i]))
+				if err != nil {
+					return fmt.Errorf("failed to push test content to src: %d: %v", i, err)
+				}
+				return nil
+			}
+		}(i))
+	}
+	if err := eg.Wait(); err != nil {
+		t.Fatal(err)
+	}
+
+	// perform GC
+	if err = s.GC(egCtx); err != nil {
+		t.Fatal(err)
+	}
+
+	// verify existence
+	wantExistence := []bool{true, true, false, true, false, false, false, false, false}
+	for i, wantValue := range wantExistence {
+		exists, err := s.Exists(egCtx, descs[i])
+		if err != nil {
+			t.Fatal(err)
+		}
+		if exists != wantValue {
+			t.Fatalf("want existence %d to be %v, got %v", i, wantValue, exists)
+		}
+	}
+}
+
 func equalDescriptorSet(actual []ocispec.Descriptor, expected []ocispec.Descriptor) bool {
 	if len(actual) != len(expected) {
 		return false
diff --git a/content/oci/storage.go b/content/oci/storage.go
index efb9f3d8..a581cd1f 100644
--- a/content/oci/storage.go
+++ b/content/oci/storage.go
@@ -25,6 +25,7 @@ import (
 	"path/filepath"
 	"sync"
 
+	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"oras.land/oras-go/v2/errdef"
 	"oras.land/oras-go/v2/internal/ioutil"
@@ -109,15 +110,19 @@ func (s *Storage) Push(_ context.Context, expected ocispec.Descriptor, content i
 
 // Delete removes the target from the system.
 func (s *Storage) Delete(ctx context.Context, target ocispec.Descriptor) error {
-	path, err := blobPath(target.Digest)
+	return s.deleteByDigest(ctx, target.Digest)
+}
+
+func (s *Storage) deleteByDigest(ctx context.Context, digest digest.Digest) error {
+	path, err := blobPath(digest)
 	if err != nil {
-		return fmt.Errorf("%s: %s: %w", target.Digest, target.MediaType, errdef.ErrInvalidDigest)
+		return fmt.Errorf("%s: %w", digest, errdef.ErrInvalidDigest)
 	}
 	targetPath := filepath.Join(s.root, path)
 	err = os.Remove(targetPath)
 	if err != nil {
 		if errors.Is(err, fs.ErrNotExist) {
-			return fmt.Errorf("%s: %s: %w", target.Digest, target.MediaType, errdef.ErrNotFound)
+			return fmt.Errorf("%s: %w", digest, errdef.ErrNotFound)
 		}
 		return err
 	}
diff --git a/internal/graph/memory.go b/internal/graph/memory.go
index b93df83e..c9655335 100644
--- a/internal/graph/memory.go
+++ b/internal/graph/memory.go
@@ -20,11 +20,11 @@ import (
 	"errors"
 	"sync"
 
+	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"oras.land/oras-go/v2/content"
 	"oras.land/oras-go/v2/errdef"
 	"oras.land/oras-go/v2/internal/container/set"
-	"oras.land/oras-go/v2/internal/descriptor"
 	"oras.land/oras-go/v2/internal/status"
 	"oras.land/oras-go/v2/internal/syncutil"
 )
@@ -35,7 +35,7 @@ type Memory struct {
 	//  1. a node exists in Memory.nodes if and only if it exists in the memory
 	//  2. Memory.nodes saves the ocispec.Descriptor map keys, which are used by
 	//    the other fields.
-	nodes map[descriptor.Descriptor]ocispec.Descriptor
+	nodes map[digest.Digest]ocispec.Descriptor
 
 	// predecessors has the following properties and behaviors:
 	//  1. a node exists in Memory.predecessors if it has at least one predecessor
@@ -43,14 +43,14 @@ type Memory struct {
 	//    the memory.
 	//  2. a node does not exist in Memory.predecessors, if it doesn't have any predecessors
 	//    in the memory.
-	predecessors map[descriptor.Descriptor]set.Set[descriptor.Descriptor]
+	predecessors map[digest.Digest]set.Set[digest.Digest]
 
 	// successors has the following properties and behaviors:
 	//  1. a node exists in Memory.successors if and only if it exists in the memory.
 	//  2. a node's entry in Memory.successors is always consistent with the actual
 	//    content of the node, regardless of whether or not each successor exists
 	//    in the memory.
-	successors map[descriptor.Descriptor]set.Set[descriptor.Descriptor]
+	successors map[digest.Digest]set.Set[digest.Digest]
 
 	lock sync.RWMutex
 }
@@ -58,9 +58,9 @@ type Memory struct {
 // NewMemory creates a new memory PredecessorFinder.
 func NewMemory() *Memory {
 	return &Memory{
-		nodes:        make(map[descriptor.Descriptor]ocispec.Descriptor),
-		predecessors: make(map[descriptor.Descriptor]set.Set[descriptor.Descriptor]),
-		successors:   make(map[descriptor.Descriptor]set.Set[descriptor.Descriptor]),
+		nodes:        make(map[digest.Digest]ocispec.Descriptor),
+		predecessors: make(map[digest.Digest]set.Set[digest.Digest]),
+		successors:   make(map[digest.Digest]set.Set[digest.Digest]),
 	}
 }
 
@@ -107,7 +107,7 @@ func (m *Memory) Predecessors(_ context.Context, node ocispec.Descriptor) ([]oci
 	m.lock.RLock()
 	defer m.lock.RUnlock()
 
-	key := descriptor.FromOCI(node)
+	key := node.Digest
 	set, exists := m.predecessors[key]
 	if !exists {
 		return nil, nil
@@ -125,7 +125,7 @@ func (m *Memory) Remove(node ocispec.Descriptor) []ocispec.Descriptor {
 	m.lock.Lock()
 	defer m.lock.Unlock()
 
-	nodeKey := descriptor.FromOCI(node)
+	nodeKey := node.Digest
 	var danglings []ocispec.Descriptor
 	// remove the node from its successors' predecessor list
 	for successorKey := range m.successors[nodeKey] {
@@ -157,22 +157,39 @@ func (m *Memory) index(ctx context.Context, fetcher content.Fetcher, node ocispe
 	defer m.lock.Unlock()
 
 	// index the node
-	nodeKey := descriptor.FromOCI(node)
+	nodeKey := node.Digest
 	m.nodes[nodeKey] = node
 
 	// for each successor, put it into the node's successors list, and
 	// put node into the succeesor's predecessors list
-	successorSet := set.New[descriptor.Descriptor]()
+	successorSet := set.New[digest.Digest]()
 	m.successors[nodeKey] = successorSet
 	for _, successor := range successors {
-		successorKey := descriptor.FromOCI(successor)
+		successorKey := successor.Digest
 		successorSet.Add(successorKey)
 		predecessorSet, exists := m.predecessors[successorKey]
 		if !exists {
-			predecessorSet = set.New[descriptor.Descriptor]()
+			predecessorSet = set.New[digest.Digest]()
 			m.predecessors[successorKey] = predecessorSet
 		}
 		predecessorSet.Add(nodeKey)
 	}
 	return successors, nil
 }
+
+// Exists returns if a blob denoted by its digest exists in the memory.
+func (m *Memory) Exists(dgst digest.Digest) bool {
+	_, exists := m.nodes[dgst]
+	return exists
+}
+
+// GetDanglingLayers returns the dangling (unreferenced) layer nodes in the memory.
+func (m *Memory) GetDanglingLayers() []ocispec.Descriptor {
+	var danglings []ocispec.Descriptor
+	for key, desc := range m.nodes {
+		if _, exist := m.predecessors[key]; !exist {
+			danglings = append(danglings, desc)
+		}
+	}
+	return danglings
+}
diff --git a/internal/graph/memory_test.go b/internal/graph/memory_test.go
index 9b5bab33..de5eed0c 100644
--- a/internal/graph/memory_test.go
+++ b/internal/graph/memory_test.go
@@ -26,7 +26,6 @@ import (
 	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"oras.land/oras-go/v2/internal/cas"
-	"oras.land/oras-go/v2/internal/descriptor"
 )
 
 // +------------------------------+
@@ -104,19 +103,14 @@ func TestMemory_IndexAndRemove(t *testing.T) {
 		t.Errorf("testFetcher.Fetch() = %v, want %v", got, blobs[4])
 	}
 
-	nodeKeyA := descriptor.FromOCI(descA)
-	nodeKeyB := descriptor.FromOCI(descB)
-	nodeKeyC := descriptor.FromOCI(descC)
-	nodeKeyD := descriptor.FromOCI(descD)
-
 	// index and check the information of node D
 	testMemory.Index(ctx, testFetcher, descD)
 	// 1. verify its existence in testMemory.nodes
-	if _, exists := testMemory.nodes[nodeKeyD]; !exists {
+	if _, exists := testMemory.nodes[descD.Digest]; !exists {
 		t.Errorf("nodes entry of %s should exist", "D")
 	}
 	// 2. verify that the entry of D exists in testMemory.successors and it's empty
-	successorsD, exists := testMemory.successors[nodeKeyD]
+	successorsD, exists := testMemory.successors[descD.Digest]
 	if !exists {
 		t.Errorf("successor entry of %s should exist", "D")
 	}
@@ -127,7 +121,7 @@ func TestMemory_IndexAndRemove(t *testing.T) {
 		t.Errorf("successors of %s should be empty", "D")
 	}
 	// 3. there should be no entry of D in testMemory.predecessors yet
-	_, exists = testMemory.predecessors[nodeKeyD]
+	_, exists = testMemory.predecessors[descD.Digest]
 	if exists {
 		t.Errorf("predecessor entry of %s should not exist yet", "D")
 	}
@@ -135,11 +129,11 @@ func TestMemory_IndexAndRemove(t *testing.T) {
 	// index and check the information of node C
 	testMemory.Index(ctx, testFetcher, descC)
 	// 1. verify its existence in memory.nodes
-	if _, exists := testMemory.nodes[nodeKeyC]; !exists {
+	if _, exists := testMemory.nodes[descC.Digest]; !exists {
 		t.Errorf("nodes entry of %s should exist", "C")
 	}
 	// 2. verify that the entry of C exists in testMemory.successors and it's empty
-	successorsC, exists := testMemory.successors[nodeKeyC]
+	successorsC, exists := testMemory.successors[descC.Digest]
 	if !exists {
 		t.Errorf("successor entry of %s should exist", "C")
 	}
@@ -150,7 +144,7 @@ func TestMemory_IndexAndRemove(t *testing.T) {
 		t.Errorf("successors of %s should be empty", "C")
 	}
 	// 3. there should be no entry of C in testMemory.predecessors yet
-	_, exists = testMemory.predecessors[nodeKeyC]
+	_, exists = testMemory.predecessors[descC.Digest]
 	if exists {
 		t.Errorf("predecessor entry of %s should not exist yet", "C")
 	}
@@ -158,48 +152,48 @@ func TestMemory_IndexAndRemove(t *testing.T) {
 	// index and check the information of node A
 	testMemory.Index(ctx, testFetcher, descA)
 	// 1. verify its existence in testMemory.nodes
-	if _, exists := testMemory.nodes[nodeKeyA]; !exists {
+	if _, exists := testMemory.nodes[descA.Digest]; !exists {
 		t.Errorf("nodes entry of %s should exist", "A")
 	}
 	// 2. verify that the entry of A exists in testMemory.successors and it contains
 	// node B and node D
-	successorsA, exists := testMemory.successors[nodeKeyA]
+	successorsA, exists := testMemory.successors[descA.Digest]
 	if !exists {
 		t.Errorf("successor entry of %s should exist", "A")
 	}
 	if successorsA == nil {
 		t.Errorf("successors of %s should be a set, not nil", "A")
 	}
-	if !successorsA.Contains(nodeKeyB) {
+	if !successorsA.Contains(descB.Digest) {
 		t.Errorf("successors of %s should contain %s", "A", "B")
 	}
-	if !successorsA.Contains(nodeKeyD) {
+	if !successorsA.Contains(descD.Digest) {
 		t.Errorf("successors of %s should contain %s", "A", "D")
 	}
 	// 3. verify that node A exists in the predecessors lists of its successors.
 	// there should be an entry of D in testMemory.predecessors by now and it
 	// should contain A but not B
-	predecessorsD, exists := testMemory.predecessors[nodeKeyD]
+	predecessorsD, exists := testMemory.predecessors[descD.Digest]
 	if !exists {
 		t.Errorf("predecessor entry of %s should exist by now", "D")
 	}
-	if !predecessorsD.Contains(nodeKeyA) {
+	if !predecessorsD.Contains(descA.Digest) {
 		t.Errorf("predecessors of %s should contain %s", "D", "A")
 	}
-	if predecessorsD.Contains(nodeKeyB) {
+	if predecessorsD.Contains(descB.Digest) {
 		t.Errorf("predecessors of %s should not contain %s yet", "D", "B")
 	}
 	// there should be an entry of B in testMemory.predecessors now
 	// and it should contain A
-	predecessorsB, exists := testMemory.predecessors[nodeKeyB]
+	predecessorsB, exists := testMemory.predecessors[descB.Digest]
 	if !exists {
 		t.Errorf("predecessor entry of %s should exist by now", "B")
 	}
-	if !predecessorsB.Contains(nodeKeyA) {
+	if !predecessorsB.Contains(descA.Digest) {
 		t.Errorf("predecessors of %s should contain %s", "B", "A")
 	}
 	// 4. there should be no entry of A in testMemory.predecessors
-	_, exists = testMemory.predecessors[nodeKeyA]
+	_, exists = testMemory.predecessors[descA.Digest]
 	if exists {
 		t.Errorf("predecessor entry of %s should not exist", "A")
 	}
@@ -207,100 +201,100 @@ func TestMemory_IndexAndRemove(t *testing.T) {
 	// index and check the information of node B
 	testMemory.Index(ctx, testFetcher, descB)
 	// 1. verify its existence in testMemory.nodes
-	if _, exists := testMemory.nodes[nodeKeyB]; !exists {
+	if _, exists := testMemory.nodes[descB.Digest]; !exists {
 		t.Errorf("nodes entry of %s should exist", "B")
 	}
 	// 2. verify that the entry of B exists in testMemory.successors and it contains
 	// node C and node D
-	successorsB, exists := testMemory.successors[nodeKeyB]
+	successorsB, exists := testMemory.successors[descB.Digest]
 	if !exists {
 		t.Errorf("successor entry of %s should exist", "B")
 	}
 	if successorsB == nil {
 		t.Errorf("successors of %s should be a set, not nil", "B")
 	}
-	if !successorsB.Contains(nodeKeyC) {
+	if !successorsB.Contains(descC.Digest) {
 		t.Errorf("successors of %s should contain %s", "B", "C")
 	}
-	if !successorsB.Contains(nodeKeyD) {
+	if !successorsB.Contains(descD.Digest) {
 		t.Errorf("successors of %s should contain %s", "B", "D")
 	}
 	// 3. verify that node B exists in the predecessors lists of its successors.
 	// there should be an entry of C in testMemory.predecessors by now
 	// and it should contain B
-	predecessorsC, exists := testMemory.predecessors[nodeKeyC]
+	predecessorsC, exists := testMemory.predecessors[descC.Digest]
 	if !exists {
 		t.Errorf("predecessor entry of %s should exist by now", "C")
 	}
-	if !predecessorsC.Contains(nodeKeyB) {
+	if !predecessorsC.Contains(descB.Digest) {
 		t.Errorf("predecessors of %s should contain %s", "C", "B")
 	}
 	// predecessors of D should have been updated now to have node A and B
-	if !predecessorsD.Contains(nodeKeyB) {
+	if !predecessorsD.Contains(descB.Digest) {
 		t.Errorf("predecessors of %s should contain %s", "D", "B")
 	}
-	if !predecessorsD.Contains(nodeKeyA) {
+	if !predecessorsD.Contains(descA.Digest) {
 		t.Errorf("predecessors of %s should contain %s", "D", "A")
 	}
 
 	// remove node B and check the stored information
 	testMemory.Remove(descB)
 	// 1. verify that node B no longer exists in testMemory.nodes
-	if _, exists := testMemory.nodes[nodeKeyB]; exists {
+	if _, exists := testMemory.nodes[descB.Digest]; exists {
 		t.Errorf("nodes entry of %s should no longer exist", "B")
 	}
 	// 2. verify B' predecessors info: B's entry in testMemory.predecessors should
 	// still exist, since its predecessor A still exists
-	predecessorsB, exists = testMemory.predecessors[nodeKeyB]
+	predecessorsB, exists = testMemory.predecessors[descB.Digest]
 	if !exists {
 		t.Errorf("testDeletableMemory.predecessors should still contain the entry of %s", "B")
 	}
-	if !predecessorsB.Contains(nodeKeyA) {
+	if !predecessorsB.Contains(descA.Digest) {
 		t.Errorf("predecessors of %s should still contain %s", "B", "A")
 	}
 	// 3. verify B' successors info: B's entry in testMemory.successors should no
 	// longer exist
-	if _, exists := testMemory.successors[nodeKeyB]; exists {
+	if _, exists := testMemory.successors[descB.Digest]; exists {
 		t.Errorf("testDeletableMemory.successors should not contain the entry of %s", "B")
 	}
 	// 4. verify B' predecessors' successors info: B should still exist in A's
 	// successors
-	if !successorsA.Contains(nodeKeyB) {
+	if !successorsA.Contains(descB.Digest) {
 		t.Errorf("successors of %s should still contain %s", "A", "B")
 	}
 	// 5. verify B' successors' predecessors info: C's entry in testMemory.predecessors
 	// should no longer exist, since C's only predecessor B is already deleted
-	if _, exists = testMemory.predecessors[nodeKeyC]; exists {
+	if _, exists = testMemory.predecessors[descC.Digest]; exists {
 		t.Errorf("predecessor entry of %s should no longer exist by now, since all its predecessors have been deleted", "C")
 	}
 	// B should no longer exist in D's predecessors
-	if predecessorsD.Contains(nodeKeyB) {
+	if predecessorsD.Contains(descB.Digest) {
 		t.Errorf("predecessors of %s should not contain %s", "D", "B")
 	}
 	// but A still exists in D's predecessors
-	if !predecessorsD.Contains(nodeKeyA) {
+	if !predecessorsD.Contains(descA.Digest) {
 		t.Errorf("predecessors of %s should still contain %s", "D", "A")
 	}
 
 	// remove node A and check the stored information
 	testMemory.Remove(descA)
 	// 1. verify that node A no longer exists in testMemory.nodes
-	if _, exists := testMemory.nodes[nodeKeyA]; exists {
+	if _, exists := testMemory.nodes[descA.Digest]; exists {
 		t.Errorf("nodes entry of %s should no longer exist", "A")
 	}
 	// 2. verify A' successors info: A's entry in testMemory.successors should no
 	// longer exist
-	if _, exists := testMemory.successors[nodeKeyA]; exists {
+	if _, exists := testMemory.successors[descA.Digest]; exists {
 		t.Errorf("testDeletableMemory.successors should not contain the entry of %s", "A")
 	}
 	// 3. verify A' successors' predecessors info: D's entry in testMemory.predecessors
 	// should no longer exist, since all predecessors of D are already deleted
-	if _, exists = testMemory.predecessors[nodeKeyD]; exists {
+	if _, exists = testMemory.predecessors[descD.Digest]; exists {
 		t.Errorf("predecessor entry of %s should no longer exist by now, since all its predecessors have been deleted", "D")
 	}
 	// B's entry in testMemory.predecessors should no longer exist, since B's only
 	// predecessor A is already deleted
-	if _, exists = testMemory.predecessors[nodeKeyB]; exists {
+	if _, exists = testMemory.predecessors[descB.Digest]; exists {
 		t.Errorf("predecessor entry of %s should no longer exist by now, since all its predecessors have been deleted", "B")
 	}
 }
@@ -392,106 +386,99 @@ func TestMemory_IndexAllAndPredecessors(t *testing.T) {
 		t.Errorf("testFetcher.Fetch() = %v, want %v", got, blobs[4])
 	}
 
-	nodeKeyA := descriptor.FromOCI(descA)
-	nodeKeyB := descriptor.FromOCI(descB)
-	nodeKeyC := descriptor.FromOCI(descC)
-	nodeKeyD := descriptor.FromOCI(descD)
-	nodeKeyE := descriptor.FromOCI(descE)
-	nodeKeyF := descriptor.FromOCI(descF)
-
 	// index node A into testMemory using IndexAll
 	testMemory.IndexAll(ctx, testFetcher, descA)
 
 	// check the information of node A
 	// 1. verify that node A exists in testMemory.nodes
-	if _, exists := testMemory.nodes[nodeKeyA]; !exists {
+	if _, exists := testMemory.nodes[descA.Digest]; !exists {
 		t.Errorf("nodes entry of %s should exist", "A")
 	}
 	// 2. verify that there is no entry of A in predecessors
-	if _, exists := testMemory.predecessors[nodeKeyA]; exists {
+	if _, exists := testMemory.predecessors[descA.Digest]; exists {
 		t.Errorf("there should be no entry of %s in predecessors", "A")
 	}
 	// 3. verify that A has successors B, C, D
-	successorsA, exists := testMemory.successors[nodeKeyA]
+	successorsA, exists := testMemory.successors[descA.Digest]
 	if !exists {
 		t.Errorf("there should be an entry of %s in successors", "A")
 	}
-	if !successorsA.Contains(nodeKeyB) {
+	if !successorsA.Contains(descB.Digest) {
 		t.Errorf("successors of %s should contain %s", "A", "B")
 	}
-	if !successorsA.Contains(nodeKeyC) {
+	if !successorsA.Contains(descC.Digest) {
 		t.Errorf("successors of %s should contain %s", "A", "C")
 	}
-	if !successorsA.Contains(nodeKeyD) {
+	if !successorsA.Contains(descD.Digest) {
 		t.Errorf("successors of %s should contain %s", "A", "D")
 	}
 
 	// check the information of node B
 	// 1. verify that node B exists in testMemory.nodes
-	if _, exists := testMemory.nodes[nodeKeyB]; !exists {
+	if _, exists := testMemory.nodes[descB.Digest]; !exists {
 		t.Errorf("nodes entry of %s should exist", "B")
 	}
 	// 2. verify that B has node A in its predecessors
-	predecessorsB := testMemory.predecessors[nodeKeyB]
-	if !predecessorsB.Contains(nodeKeyA) {
+	predecessorsB := testMemory.predecessors[descB.Digest]
+	if !predecessorsB.Contains(descA.Digest) {
 		t.Errorf("predecessors of %s should contain %s", "B", "A")
 	}
 	// 3. verify that B has node E in its successors
-	successorsB := testMemory.successors[nodeKeyB]
-	if !successorsB.Contains(nodeKeyE) {
+	successorsB := testMemory.successors[descB.Digest]
+	if !successorsB.Contains(descE.Digest) {
 		t.Errorf("successors of %s should contain %s", "B", "E")
 	}
 
 	// check the information of node C
 	// 1. verify that node C exists in testMemory.nodes
-	if _, exists := testMemory.nodes[nodeKeyC]; !exists {
+	if _, exists := testMemory.nodes[descC.Digest]; !exists {
 		t.Errorf("nodes entry of %s should exist", "C")
 	}
 	// 2. verify that C has node A in its predecessors
-	predecessorsC := testMemory.predecessors[nodeKeyC]
-	if !predecessorsC.Contains(nodeKeyA) {
+	predecessorsC := testMemory.predecessors[descC.Digest]
+	if !predecessorsC.Contains(descA.Digest) {
 		t.Errorf("predecessors of %s should contain %s", "C", "A")
 	}
 	// 3. verify that C has node E and F in its successors
-	successorsC := testMemory.successors[nodeKeyC]
-	if !successorsC.Contains(nodeKeyE) {
+	successorsC := testMemory.successors[descC.Digest]
+	if !successorsC.Contains(descE.Digest) {
 		t.Errorf("successors of %s should contain %s", "C", "E")
 	}
-	if !successorsC.Contains(nodeKeyF) {
+	if !successorsC.Contains(descF.Digest) {
 		t.Errorf("successors of %s should contain %s", "C", "F")
 	}
 
 	// check the information of node D
 	// 1. verify that node D exists in testMemory.nodes
-	if _, exists := testMemory.nodes[nodeKeyD]; !exists {
+	if _, exists := testMemory.nodes[descD.Digest]; !exists {
 		t.Errorf("nodes entry of %s should exist", "D")
 	}
 	// 2. verify that D has node A in its predecessors
-	predecessorsD := testMemory.predecessors[nodeKeyD]
-	if !predecessorsD.Contains(nodeKeyA) {
+	predecessorsD := testMemory.predecessors[descD.Digest]
+	if !predecessorsD.Contains(descA.Digest) {
 		t.Errorf("predecessors of %s should contain %s", "D", "A")
 	}
 	// 3. verify that D has node F in its successors
-	successorsD := testMemory.successors[nodeKeyD]
-	if !successorsD.Contains(nodeKeyF) {
+	successorsD := testMemory.successors[descD.Digest]
+	if !successorsD.Contains(descF.Digest) {
 		t.Errorf("successors of %s should contain %s", "D", "F")
 	}
 
 	// check the information of node E
 	// 1. verify that node E exists in testMemory.nodes
-	if _, exists := testMemory.nodes[nodeKeyE]; !exists {
+	if _, exists := testMemory.nodes[descE.Digest]; !exists {
 		t.Errorf("nodes entry of %s should exist", "E")
 	}
 	// 2. verify that E has node B and C in its predecessors
-	predecessorsE := testMemory.predecessors[nodeKeyE]
-	if !predecessorsE.Contains(nodeKeyB) {
+	predecessorsE := testMemory.predecessors[descE.Digest]
+	if !predecessorsE.Contains(descB.Digest) {
 		t.Errorf("predecessors of %s should contain %s", "E", "B")
 	}
-	if !predecessorsE.Contains(nodeKeyC) {
+	if !predecessorsE.Contains(descC.Digest) {
 		t.Errorf("predecessors of %s should contain %s", "E", "C")
 	}
 	// 3. verify that E has an entry in successors and it's empty
-	successorsE, exists := testMemory.successors[nodeKeyE]
+	successorsE, exists := testMemory.successors[descE.Digest]
 	if !exists {
 		t.Errorf("entry %s should exist in testMemory.successors", "E")
 	}
@@ -504,19 +491,19 @@ func TestMemory_IndexAllAndPredecessors(t *testing.T) {
 
 	// check the information of node F
 	// 1. verify that node F exists in testMemory.nodes
-	if _, exists := testMemory.nodes[nodeKeyF]; !exists {
+	if _, exists := testMemory.nodes[descF.Digest]; !exists {
 		t.Errorf("nodes entry of %s should exist", "F")
 	}
 	// 2. verify that F has node C and D in its predecessors
-	predecessorsF := testMemory.predecessors[nodeKeyF]
-	if !predecessorsF.Contains(nodeKeyC) {
+	predecessorsF := testMemory.predecessors[descF.Digest]
+	if !predecessorsF.Contains(descC.Digest) {
 		t.Errorf("predecessors of %s should contain %s", "F", "C")
 	}
-	if !predecessorsF.Contains(nodeKeyD) {
+	if !predecessorsF.Contains(descD.Digest) {
 		t.Errorf("predecessors of %s should contain %s", "F", "D")
 	}
 	// 3. verify that F has an entry in successors and it's empty
-	successorsF, exists := testMemory.successors[nodeKeyF]
+	successorsF, exists := testMemory.successors[descF.Digest]
 	if !exists {
 		t.Errorf("entry %s should exist in testMemory.successors", "F")
 	}
@@ -557,34 +544,34 @@ func TestMemory_IndexAllAndPredecessors(t *testing.T) {
 
 	// remove node C and check the stored information
 	testMemory.Remove(descC)
-	if predecessorsE.Contains(nodeKeyC) {
+	if predecessorsE.Contains(descC.Digest) {
 		t.Errorf("predecessors of %s should not contain %s", "E", "C")
 	}
-	if predecessorsF.Contains(nodeKeyC) {
+	if predecessorsF.Contains(descC.Digest) {
 		t.Errorf("predecessors of %s should not contain %s", "F", "C")
 	}
-	if !successorsA.Contains(nodeKeyC) {
+	if !successorsA.Contains(descC.Digest) {
 		t.Errorf("successors of %s should still contain %s", "A", "C")
 	}
-	if _, exists := testMemory.successors[nodeKeyC]; exists {
+	if _, exists := testMemory.successors[descC.Digest]; exists {
 		t.Errorf("testMemory.successors should not contain the entry of %s", "C")
 	}
-	if _, exists := testMemory.predecessors[nodeKeyC]; !exists {
+	if _, exists := testMemory.predecessors[descC.Digest]; !exists {
 		t.Errorf("entry %s in predecessors should still exists since it still has at least one predecessor node present", "C")
 	}
 
 	// remove node A and check the stored information
 	testMemory.Remove(descA)
-	if _, exists := testMemory.predecessors[nodeKeyB]; exists {
+	if _, exists := testMemory.predecessors[descB.Digest]; exists {
 		t.Errorf("entry %s in predecessors should no longer exists", "B")
 	}
-	if _, exists := testMemory.predecessors[nodeKeyC]; exists {
+	if _, exists := testMemory.predecessors[descC.Digest]; exists {
 		t.Errorf("entry %s in predecessors should no longer exists", "C")
 	}
-	if _, exists := testMemory.predecessors[nodeKeyD]; exists {
+	if _, exists := testMemory.predecessors[descD.Digest]; exists {
 		t.Errorf("entry %s in predecessors should no longer exists", "D")
 	}
-	if _, exists := testMemory.successors[nodeKeyA]; exists {
+	if _, exists := testMemory.successors[descA.Digest]; exists {
 		t.Errorf("testDeletableMemory.successors should not contain the entry of %s", "A")
 	}
 

From 07e89dfd08e70baa04abd6646fa4d6276ad87feb Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Tue, 2 Jan 2024 14:31:28 +0800
Subject: [PATCH 02/25] resolved dangling tree issue

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go      | 17 +++++++-----
 content/oci/oci_test.go | 60 ++++++++++++++++++++++++++++++-----------
 2 files changed, 55 insertions(+), 22 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index 64d62ee0..4e10311f 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -169,13 +169,17 @@ func (s *Store) Delete(ctx context.Context, target ocispec.Descriptor) error {
 	s.sync.Lock()
 	defer s.sync.Unlock()
 
+	return s.delete(ctx, target, s.AutoGC, s.AutoDeleteReferrers)
+}
+
+func (s *Store) delete(ctx context.Context, target ocispec.Descriptor, autoGC bool, autoDeleteReferrers bool) error {
 	deleteQueue := []ocispec.Descriptor{target}
 	for len(deleteQueue) > 0 {
 		head := deleteQueue[0]
 		deleteQueue = deleteQueue[1:]
 
 		// get referrers if applicable
-		if s.AutoDeleteReferrers && descriptor.IsManifest(head) {
+		if autoDeleteReferrers && descriptor.IsManifest(head) {
 			referrers, err := registry.Referrers(ctx, &unsafeStore{s}, head, "")
 			if err != nil {
 				return err
@@ -184,11 +188,11 @@ func (s *Store) Delete(ctx context.Context, target ocispec.Descriptor) error {
 		}
 
 		// delete the head of queue
-		danglings, err := s.delete(ctx, head)
+		danglings, err := s.deleteNode(ctx, head)
 		if err != nil {
 			return err
 		}
-		if s.AutoGC {
+		if autoGC {
 			for _, d := range danglings {
 				// do not delete existing manifests in tagResolver
 				_, err = s.tagResolver.Resolve(ctx, string(d.Digest))
@@ -202,8 +206,8 @@ func (s *Store) Delete(ctx context.Context, target ocispec.Descriptor) error {
 	return nil
 }
 
-// delete deletes one node and returns the dangling nodes caused by the delete.
-func (s *Store) delete(ctx context.Context, target ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+// deleteNode deletes one node and returns the dangling nodes caused by the delete.
+func (s *Store) deleteNode(ctx context.Context, target ocispec.Descriptor) ([]ocispec.Descriptor, error) {
 	resolvers := s.tagResolver.Map()
 	untagged := false
 	for reference, desc := range resolvers {
@@ -468,8 +472,7 @@ func (s *Store) GC(ctx context.Context) error {
 		// do not remove existing manifests in the index
 		if _, err := s.tagResolver.Resolve(ctx, string(desc.Digest)); err == errdef.ErrNotFound {
 			// remove the blob and its metadata from the Store
-			// TODO: s.delete returns 2 variables, a dangling tree is not deleted properly
-			if _, err := s.delete(ctx, desc); err != nil {
+			if err := s.delete(ctx, desc, true, true); err != nil {
 				return err
 			}
 		}
diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index c4cdc047..1dcc557b 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -2874,20 +2874,44 @@ func TestStore_GC(t *testing.T) {
 		}
 		appendBlob(ocispec.MediaTypeImageManifest, manifestJSON)
 	}
+	generateImageIndex := func(manifests ...ocispec.Descriptor) {
+		index := ocispec.Index{
+			Manifests: manifests,
+		}
+		indexJSON, err := json.Marshal(index)
+		if err != nil {
+			t.Fatal(err)
+		}
+		appendBlob(ocispec.MediaTypeImageIndex, indexJSON)
+	}
+	generateArtifactManifest := func(blobs ...ocispec.Descriptor) {
+		var manifest spec.Artifact
+		manifest.Blobs = append(manifest.Blobs, blobs...)
+		manifestJSON, err := json.Marshal(manifest)
+		if err != nil {
+			t.Fatal(err)
+		}
+		appendBlob(spec.MediaTypeArtifactManifest, manifestJSON)
+	}
 
-	appendBlob(ocispec.MediaTypeImageConfig, []byte("config"))         // Blob 0
-	appendBlob(ocispec.MediaTypeImageLayer, []byte("blob"))            // Blob 1
-	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer"))  // Blob 2, dangling layer
-	generateManifest(descs[0], descs[1])                               // Blob 3, valid manifest
-	appendBlob(ocispec.MediaTypeImageLayer, []byte("garbage layer 1")) // Blob 4, garbage layer 1
-	generateManifest(descs[0], descs[4])                               // Blob 5, garbage manifest 1
-	appendBlob(ocispec.MediaTypeImageConfig, []byte("garbage config")) // Blob 6, garbage config
-	appendBlob(ocispec.MediaTypeImageLayer, []byte("garbage layer 2")) // Blob 7, garbage layer 2
-	generateManifest(descs[6], descs[7])                               // Blob 8, garbage manifest 2
-
-	// push blobs[0] - blobs[3] into s
+	appendBlob(ocispec.MediaTypeImageConfig, []byte("config"))          // Blob 0
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("blob"))             // Blob 1
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer"))   // Blob 2, dangling layer
+	generateManifest(descs[0], descs[1])                                // Blob 3, valid manifest
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer 2")) // Blob 4, dangling layer
+	generateArtifactManifest(descs[4])                                  // blob 5, dangling artifact
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer 3")) // Blob 6, dangling layer
+	generateArtifactManifest(descs[6])                                  // blob 7, dangling artifact
+	generateImageIndex(descs[7], descs[5])                              // blob 8, dangling image index
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("garbage layer 1"))  // Blob 9, garbage layer 1
+	generateManifest(descs[0], descs[4])                                // Blob 10, garbage manifest 1
+	appendBlob(ocispec.MediaTypeImageConfig, []byte("garbage config"))  // Blob 11, garbage config
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("garbage layer 2"))  // Blob 12, garbage layer 2
+	generateManifest(descs[6], descs[7])                                // Blob 13, garbage manifest 2
+
+	// push blobs 0 - blobs 8 into s
 	eg, egCtx := errgroup.WithContext(ctx)
-	for i := 0; i <= 3; i++ {
+	for i := 0; i <= 8; i++ {
 		eg.Go(func(i int) func() error {
 			return func() error {
 				err := s.Push(egCtx, descs[i], bytes.NewReader(blobs[i]))
@@ -2902,9 +2926,15 @@ func TestStore_GC(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	// push blobs[4] - blobs[8] into s.storage, making them garbage as their metadata
+	// remove blobs 4 - blobs 8 from index, making them a dangling tree
+	for i := 4; i <= 8; i++ {
+		s.tagResolver.Untag(string(descs[i].Digest))
+	}
+	s.SaveIndex()
+
+	// push blobs 9 - blobs 13 into s.storage, making them garbage as their metadata
 	// doesn't exist in s
-	for i := 4; i < len(blobs); i++ {
+	for i := 9; i < len(blobs); i++ {
 		eg.Go(func(i int) func() error {
 			return func() error {
 				err := s.storage.Push(egCtx, descs[i], bytes.NewReader(blobs[i]))
@@ -2925,7 +2955,7 @@ func TestStore_GC(t *testing.T) {
 	}
 
 	// verify existence
-	wantExistence := []bool{true, true, false, true, false, false, false, false, false}
+	wantExistence := []bool{true, true, false, true, false, false, false, false, false, false, false, false, false, false}
 	for i, wantValue := range wantExistence {
 		exists, err := s.Exists(egCtx, descs[i])
 		if err != nil {

From c921137360836270511e7eafc3d35fc1c7e109c2 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Wed, 3 Jan 2024 09:52:35 +0800
Subject: [PATCH 03/25] removed storage changes

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go     | 23 ++++++++++++++---------
 content/oci/storage.go | 11 +++--------
 2 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index 4e10311f..916a71e5 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -485,17 +485,22 @@ func (s *Store) GC(ctx context.Context) error {
 			return err
 		}
 		// skip the directories
-		if !info.IsDir() {
-			alg := filepath.Base(filepath.Dir(path))
-			blobDigest, err := digest.Parse(fmt.Sprintf("%s:%s", alg, info.Name()))
+		if info.IsDir() {
+			return nil
+		}
+		alg := filepath.Base(filepath.Dir(path))
+		blobDigest, err := digest.Parse(fmt.Sprintf("%s:%s", alg, info.Name()))
+		if err != nil {
+			return err
+		}
+		if exists := s.graph.Exists(blobDigest); !exists {
+			// remove the blob from storage if it does not exist in Store
+			err = os.Remove(path)
 			if err != nil {
-				return err
-			}
-			if exists := s.graph.Exists(blobDigest); !exists {
-				// remove the blob from storage if it does not exist in Store
-				if err := s.storage.deleteByDigest(ctx, blobDigest); err != nil {
-					return err
+				if errors.Is(err, fs.ErrNotExist) {
+					return fmt.Errorf("%s: %w", blobDigest, errdef.ErrNotFound)
 				}
+				return err
 			}
 		}
 		return nil
diff --git a/content/oci/storage.go b/content/oci/storage.go
index a581cd1f..efb9f3d8 100644
--- a/content/oci/storage.go
+++ b/content/oci/storage.go
@@ -25,7 +25,6 @@ import (
 	"path/filepath"
 	"sync"
 
-	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"oras.land/oras-go/v2/errdef"
 	"oras.land/oras-go/v2/internal/ioutil"
@@ -110,19 +109,15 @@ func (s *Storage) Push(_ context.Context, expected ocispec.Descriptor, content i
 
 // Delete removes the target from the system.
 func (s *Storage) Delete(ctx context.Context, target ocispec.Descriptor) error {
-	return s.deleteByDigest(ctx, target.Digest)
-}
-
-func (s *Storage) deleteByDigest(ctx context.Context, digest digest.Digest) error {
-	path, err := blobPath(digest)
+	path, err := blobPath(target.Digest)
 	if err != nil {
-		return fmt.Errorf("%s: %w", digest, errdef.ErrInvalidDigest)
+		return fmt.Errorf("%s: %s: %w", target.Digest, target.MediaType, errdef.ErrInvalidDigest)
 	}
 	targetPath := filepath.Join(s.root, path)
 	err = os.Remove(targetPath)
 	if err != nil {
 		if errors.Is(err, fs.ErrNotExist) {
-			return fmt.Errorf("%s: %w", digest, errdef.ErrNotFound)
+			return fmt.Errorf("%s: %s: %w", target.Digest, target.MediaType, errdef.ErrNotFound)
 		}
 		return err
 	}

From 5083aeac0c2ac0bc55d3dd297e62abc3350495a9 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Wed, 3 Jan 2024 13:28:11 +0800
Subject: [PATCH 04/25] resolved some comments

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go       | 2 +-
 internal/graph/memory.go | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index 916a71e5..c3d63e27 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -467,7 +467,7 @@ func (s *Store) GC(ctx context.Context) error {
 	defer s.sync.Unlock()
 
 	// clean up dangling layers in Store
-	danglings := s.graph.GetDanglingLayers()
+	danglings := s.graph.GetUnreferencedRootNodes()
 	for _, desc := range danglings {
 		// do not remove existing manifests in the index
 		if _, err := s.tagResolver.Resolve(ctx, string(desc.Digest)); err == errdef.ErrNotFound {
diff --git a/internal/graph/memory.go b/internal/graph/memory.go
index c9655335..3609fc1d 100644
--- a/internal/graph/memory.go
+++ b/internal/graph/memory.go
@@ -183,8 +183,8 @@ func (m *Memory) Exists(dgst digest.Digest) bool {
 	return exists
 }
 
-// GetDanglingLayers returns the dangling (unreferenced) layer nodes in the memory.
-func (m *Memory) GetDanglingLayers() []ocispec.Descriptor {
+// GetUnreferencedRootNodes returns the dangling (unreferenced) layer nodes in the memory.
+func (m *Memory) GetUnreferencedRootNodes() []ocispec.Descriptor {
 	var danglings []ocispec.Descriptor
 	for key, desc := range m.nodes {
 		if _, exist := m.predecessors[key]; !exist {

From 7b4c0b754e24b9c4be416ed14aa6d689af49d7c3 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Wed, 3 Jan 2024 14:39:23 +0800
Subject: [PATCH 05/25] added test case for referrers

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci_test.go | 48 ++++++++++++++++++++++-------------------
 1 file changed, 26 insertions(+), 22 deletions(-)

diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index 1dcc557b..2d3d5b74 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -2863,10 +2863,11 @@ func TestStore_GC(t *testing.T) {
 			Size:      int64(len(blob)),
 		})
 	}
-	generateManifest := func(config ocispec.Descriptor, layers ...ocispec.Descriptor) {
+	generateManifest := func(config ocispec.Descriptor, subject *ocispec.Descriptor, layers ...ocispec.Descriptor) {
 		manifest := ocispec.Manifest{
-			Config: config,
-			Layers: layers,
+			Config:  config,
+			Subject: subject,
+			Layers:  layers,
 		}
 		manifestJSON, err := json.Marshal(manifest)
 		if err != nil {
@@ -2897,21 +2898,24 @@ func TestStore_GC(t *testing.T) {
 	appendBlob(ocispec.MediaTypeImageConfig, []byte("config"))          // Blob 0
 	appendBlob(ocispec.MediaTypeImageLayer, []byte("blob"))             // Blob 1
 	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer"))   // Blob 2, dangling layer
-	generateManifest(descs[0], descs[1])                                // Blob 3, valid manifest
-	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer 2")) // Blob 4, dangling layer
-	generateArtifactManifest(descs[4])                                  // blob 5, dangling artifact
-	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer 3")) // Blob 6, dangling layer
-	generateArtifactManifest(descs[6])                                  // blob 7, dangling artifact
-	generateImageIndex(descs[7], descs[5])                              // blob 8, dangling image index
-	appendBlob(ocispec.MediaTypeImageLayer, []byte("garbage layer 1"))  // Blob 9, garbage layer 1
-	generateManifest(descs[0], descs[4])                                // Blob 10, garbage manifest 1
-	appendBlob(ocispec.MediaTypeImageConfig, []byte("garbage config"))  // Blob 11, garbage config
-	appendBlob(ocispec.MediaTypeImageLayer, []byte("garbage layer 2"))  // Blob 12, garbage layer 2
-	generateManifest(descs[6], descs[7])                                // Blob 13, garbage manifest 2
-
-	// push blobs 0 - blobs 8 into s
+	generateManifest(descs[0], nil, descs[1])                           // Blob 3, valid manifest
+	generateManifest(descs[0], &descs[3], descs[1])                     // Blob 4, referrer of a valid manifest
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer 2")) // Blob 5, dangling layer
+	generateArtifactManifest(descs[4])                                  // blob 6, dangling artifact
+	generateManifest(descs[0], &descs[5], descs[1])                     // Blob 7, referrer of a dangling manifest
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer 3")) // Blob 8, dangling layer
+	generateArtifactManifest(descs[6])                                  // blob 9, dangling artifact
+	generateImageIndex(descs[7], descs[5])                              // blob 10, dangling image index
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("garbage layer 1"))  // Blob 11, garbage layer 1
+	generateManifest(descs[0], nil, descs[4])                           // Blob 12, garbage manifest 1
+	appendBlob(ocispec.MediaTypeImageConfig, []byte("garbage config"))  // Blob 13, garbage config
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("garbage layer 2"))  // Blob 14, garbage layer 2
+	generateManifest(descs[6], nil, descs[7])                           // Blob 15, garbage manifest 2
+	generateManifest(descs[0], &descs[13], descs[1])                    // Blob 16, referrer of a garbage manifest
+
+	// push blobs 0 - blobs 10 into s
 	eg, egCtx := errgroup.WithContext(ctx)
-	for i := 0; i <= 8; i++ {
+	for i := 0; i <= 10; i++ {
 		eg.Go(func(i int) func() error {
 			return func() error {
 				err := s.Push(egCtx, descs[i], bytes.NewReader(blobs[i]))
@@ -2926,15 +2930,15 @@ func TestStore_GC(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	// remove blobs 4 - blobs 8 from index, making them a dangling tree
-	for i := 4; i <= 8; i++ {
+	// remove blobs 5 - blobs 10 from index, making them a dangling tree
+	for i := 5; i <= 10; i++ {
 		s.tagResolver.Untag(string(descs[i].Digest))
 	}
 	s.SaveIndex()
 
-	// push blobs 9 - blobs 13 into s.storage, making them garbage as their metadata
+	// push blobs 11 - blobs 16 into s.storage, making them garbage as their metadata
 	// doesn't exist in s
-	for i := 9; i < len(blobs); i++ {
+	for i := 11; i < len(blobs); i++ {
 		eg.Go(func(i int) func() error {
 			return func() error {
 				err := s.storage.Push(egCtx, descs[i], bytes.NewReader(blobs[i]))
@@ -2955,7 +2959,7 @@ func TestStore_GC(t *testing.T) {
 	}
 
 	// verify existence
-	wantExistence := []bool{true, true, false, true, false, false, false, false, false, false, false, false, false, false}
+	wantExistence := []bool{true, true, false, true, true, false, false, false, false, false, false, false, false, false, false, false, false}
 	for i, wantValue := range wantExistence {
 		exists, err := s.Exists(egCtx, descs[i])
 		if err != nil {

From a809ea2819ca78d3e3610bfa805590c8bcf4023d Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Thu, 4 Jan 2024 10:08:56 +0800
Subject: [PATCH 06/25] replaced walk with loop

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go | 51 ++++++++++++++++++++++++++++++----------------
 1 file changed, 34 insertions(+), 17 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index c3d63e27..c3ba13ea 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -25,6 +25,7 @@ import (
 	"io"
 	"io/fs"
 	"os"
+	"path"
 	"path/filepath"
 	"sync"
 
@@ -480,31 +481,42 @@ func (s *Store) GC(ctx context.Context) error {
 
 	// clean up garbage blobs in the storage
 	rootpath := filepath.Join(s.root, "blobs")
-	return filepath.Walk(rootpath, func(path string, info fs.FileInfo, err error) error {
-		if err != nil {
-			return err
-		}
-		// skip the directories
-		if info.IsDir() {
-			return nil
+	algDirs, err := os.ReadDir(rootpath)
+	if err != nil {
+		return err
+	}
+	for _, algDir := range algDirs {
+		alg := algDir.Name()
+		// skip unsupported directories
+		if !isValidAlgorithm(alg) {
+			continue
 		}
-		alg := filepath.Base(filepath.Dir(path))
-		blobDigest, err := digest.Parse(fmt.Sprintf("%s:%s", alg, info.Name()))
+		algPath := path.Join(rootpath, alg)
+		dgstDirs, err := os.ReadDir(algPath)
 		if err != nil {
 			return err
 		}
-		if exists := s.graph.Exists(blobDigest); !exists {
-			// remove the blob from storage if it does not exist in Store
-			err = os.Remove(path)
+		for _, dgstDir := range dgstDirs {
+			dgst := dgstDir.Name()
+			blobDigest := digest.NewDigestFromEncoded(digest.Algorithm(alg), dgst)
+			err := blobDigest.Validate()
+			// skip unsupported directories
 			if err != nil {
-				if errors.Is(err, fs.ErrNotExist) {
-					return fmt.Errorf("%s: %w", blobDigest, errdef.ErrNotFound)
+				continue
+			}
+			if exists := s.graph.Exists(blobDigest); !exists {
+				// remove the blob from storage if it does not exist in Store
+				err = os.Remove(path.Join(algPath, dgst))
+				if err != nil {
+					if errors.Is(err, fs.ErrNotExist) {
+						return fmt.Errorf("%s: %w", blobDigest, errdef.ErrNotFound)
+					}
+					return err
 				}
-				return err
 			}
 		}
-		return nil
-	})
+	}
+	return nil
 }
 
 // unsafeStore is used to bypass lock restrictions in Delete.
@@ -529,3 +541,8 @@ func validateReference(ref string) error {
 	// TODO: may enforce more strict validation if needed.
 	return nil
 }
+
+// isValidAlgorithm checks is a string is a supported hash algorithm
+func isValidAlgorithm(alg string) bool {
+	return alg == string(digest.SHA256) || alg == string(digest.SHA512) || alg == string(digest.SHA384)
+}

From 4130046fab4322d9f18356c7d236fe1aa8e6fdfa Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Thu, 4 Jan 2024 12:28:58 +0800
Subject: [PATCH 07/25] used traverseIndex

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go       | 49 +++++++++++++++++++++++++++++++---------
 content/oci/oci_test.go  |  4 ++--
 internal/graph/memory.go | 17 --------------
 3 files changed, 40 insertions(+), 30 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index c3ba13ea..bfe47a41 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -460,6 +460,39 @@ func (s *Store) writeIndexFile() error {
 	return os.WriteFile(s.indexPath, indexJSON, 0666)
 }
 
+// draft
+func (s *Store) traverseIndex(ctx context.Context) (set.Set[digest.Digest], error) {
+	manifests := s.index.Manifests
+	visited := set.New[digest.Digest]()
+	result := set.New[digest.Digest]()
+	queue := []ocispec.Descriptor{}
+	queue = append(queue, manifests...)
+	for len(queue) > 0 {
+		head := queue[0]
+		queue = queue[1:]
+		if visited.Contains(head.Digest) {
+			continue
+		}
+		result.Add(head.Digest)
+		visited.Add(head.Digest)
+		// find successors
+		succ, err := content.Successors(ctx, &unsafeStore{s}, head)
+		if err != nil {
+			return nil, err
+		}
+		queue = append(queue, succ...)
+		// find referrers
+		if descriptor.IsManifest(head) {
+			refs, err := registry.Referrers(ctx, &unsafeStore{s}, head, "")
+			if err != nil {
+				return nil, err
+			}
+			queue = append(queue, refs...)
+		}
+	}
+	return result, nil
+}
+
 // GC removes garbage from Store. The garbage to be cleaned are:
 //   - unreferenced (dangling) blobs in Store which have no predecessors
 //   - garbage blobs in the storage whose metadata is not stored in Store
@@ -467,16 +500,10 @@ func (s *Store) GC(ctx context.Context) error {
 	s.sync.Lock()
 	defer s.sync.Unlock()
 
-	// clean up dangling layers in Store
-	danglings := s.graph.GetUnreferencedRootNodes()
-	for _, desc := range danglings {
-		// do not remove existing manifests in the index
-		if _, err := s.tagResolver.Resolve(ctx, string(desc.Digest)); err == errdef.ErrNotFound {
-			// remove the blob and its metadata from the Store
-			if err := s.delete(ctx, desc, true, true); err != nil {
-				return err
-			}
-		}
+	//traverse index
+	mySet, err := s.traverseIndex(ctx)
+	if err != nil {
+		return err
 	}
 
 	// clean up garbage blobs in the storage
@@ -504,7 +531,7 @@ func (s *Store) GC(ctx context.Context) error {
 			if err != nil {
 				continue
 			}
-			if exists := s.graph.Exists(blobDigest); !exists {
+			if exists := mySet.Contains(blobDigest); !exists {
 				// remove the blob from storage if it does not exist in Store
 				err = os.Remove(path.Join(algPath, dgst))
 				if err != nil {
diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index 2d3d5b74..6804a093 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -2930,8 +2930,8 @@ func TestStore_GC(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	// remove blobs 5 - blobs 10 from index, making them a dangling tree
-	for i := 5; i <= 10; i++ {
+	// remove blobs 4 - blobs 10 from index, making them a dangling tree
+	for i := 4; i <= 10; i++ {
 		s.tagResolver.Untag(string(descs[i].Digest))
 	}
 	s.SaveIndex()
diff --git a/internal/graph/memory.go b/internal/graph/memory.go
index 3609fc1d..31a7dbec 100644
--- a/internal/graph/memory.go
+++ b/internal/graph/memory.go
@@ -176,20 +176,3 @@ func (m *Memory) index(ctx context.Context, fetcher content.Fetcher, node ocispe
 	}
 	return successors, nil
 }
-
-// Exists returns if a blob denoted by its digest exists in the memory.
-func (m *Memory) Exists(dgst digest.Digest) bool {
-	_, exists := m.nodes[dgst]
-	return exists
-}
-
-// GetUnreferencedRootNodes returns the dangling (unreferenced) layer nodes in the memory.
-func (m *Memory) GetUnreferencedRootNodes() []ocispec.Descriptor {
-	var danglings []ocispec.Descriptor
-	for key, desc := range m.nodes {
-		if _, exist := m.predecessors[key]; !exist {
-			danglings = append(danglings, desc)
-		}
-	}
-	return danglings
-}

From b2dd91c597991331ea052a6738e67e9748893ba1 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Thu, 4 Jan 2024 12:59:54 +0800
Subject: [PATCH 08/25] change delete back

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go | 14 +++++---------
 1 file changed, 5 insertions(+), 9 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index bfe47a41..b8031714 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -170,17 +170,13 @@ func (s *Store) Delete(ctx context.Context, target ocispec.Descriptor) error {
 	s.sync.Lock()
 	defer s.sync.Unlock()
 
-	return s.delete(ctx, target, s.AutoGC, s.AutoDeleteReferrers)
-}
-
-func (s *Store) delete(ctx context.Context, target ocispec.Descriptor, autoGC bool, autoDeleteReferrers bool) error {
 	deleteQueue := []ocispec.Descriptor{target}
 	for len(deleteQueue) > 0 {
 		head := deleteQueue[0]
 		deleteQueue = deleteQueue[1:]
 
 		// get referrers if applicable
-		if autoDeleteReferrers && descriptor.IsManifest(head) {
+		if s.AutoDeleteReferrers && descriptor.IsManifest(head) {
 			referrers, err := registry.Referrers(ctx, &unsafeStore{s}, head, "")
 			if err != nil {
 				return err
@@ -189,11 +185,11 @@ func (s *Store) delete(ctx context.Context, target ocispec.Descriptor, autoGC bo
 		}
 
 		// delete the head of queue
-		danglings, err := s.deleteNode(ctx, head)
+		danglings, err := s.delete(ctx, head)
 		if err != nil {
 			return err
 		}
-		if autoGC {
+		if s.AutoGC {
 			for _, d := range danglings {
 				// do not delete existing manifests in tagResolver
 				_, err = s.tagResolver.Resolve(ctx, string(d.Digest))
@@ -207,8 +203,8 @@ func (s *Store) delete(ctx context.Context, target ocispec.Descriptor, autoGC bo
 	return nil
 }
 
-// deleteNode deletes one node and returns the dangling nodes caused by the delete.
-func (s *Store) deleteNode(ctx context.Context, target ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+// delete deletes one node and returns the dangling nodes caused by the delete.
+func (s *Store) delete(ctx context.Context, target ocispec.Descriptor) ([]ocispec.Descriptor, error) {
 	resolvers := s.tagResolver.Map()
 	untagged := false
 	for reference, desc := range resolvers {

From a6adc3378957464168bbc521e6fa22632341fe66 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Thu, 4 Jan 2024 13:19:40 +0800
Subject: [PATCH 09/25] added context check

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go      | 33 ++++++++++++++++++++++++++-------
 content/oci/oci_test.go | 15 +++++++++++++--
 2 files changed, 39 insertions(+), 9 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index b8031714..0a8fdbce 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -456,20 +456,24 @@ func (s *Store) writeIndexFile() error {
 	return os.WriteFile(s.indexPath, indexJSON, 0666)
 }
 
-// draft
+// traverseIndex starts from index.json and visits every node reachable by the
+// successor and referrer relations. It returns a set of digest of visited nodes.
 func (s *Store) traverseIndex(ctx context.Context) (set.Set[digest.Digest], error) {
 	manifests := s.index.Manifests
 	visited := set.New[digest.Digest]()
-	result := set.New[digest.Digest]()
+	results := set.New[digest.Digest]()
 	queue := []ocispec.Descriptor{}
 	queue = append(queue, manifests...)
 	for len(queue) > 0 {
+		if err := isContextDone(ctx); err != nil {
+			return nil, err
+		}
 		head := queue[0]
 		queue = queue[1:]
 		if visited.Contains(head.Digest) {
 			continue
 		}
-		result.Add(head.Digest)
+		results.Add(head.Digest)
 		visited.Add(head.Digest)
 		// find successors
 		succ, err := content.Successors(ctx, &unsafeStore{s}, head)
@@ -486,7 +490,7 @@ func (s *Store) traverseIndex(ctx context.Context) (set.Set[digest.Digest], erro
 			queue = append(queue, refs...)
 		}
 	}
-	return result, nil
+	return results, nil
 }
 
 // GC removes garbage from Store. The garbage to be cleaned are:
@@ -496,8 +500,8 @@ func (s *Store) GC(ctx context.Context) error {
 	s.sync.Lock()
 	defer s.sync.Unlock()
 
-	//traverse index
-	mySet, err := s.traverseIndex(ctx)
+	//traverse index.json to find all reachable nodes
+	reachableNodes, err := s.traverseIndex(ctx)
 	if err != nil {
 		return err
 	}
@@ -520,6 +524,10 @@ func (s *Store) GC(ctx context.Context) error {
 			return err
 		}
 		for _, dgstDir := range dgstDirs {
+			if err := isContextDone(ctx); err != nil {
+				return err
+			}
+
 			dgst := dgstDir.Name()
 			blobDigest := digest.NewDigestFromEncoded(digest.Algorithm(alg), dgst)
 			err := blobDigest.Validate()
@@ -527,7 +535,7 @@ func (s *Store) GC(ctx context.Context) error {
 			if err != nil {
 				continue
 			}
-			if exists := mySet.Contains(blobDigest); !exists {
+			if !reachableNodes.Contains(blobDigest) {
 				// remove the blob from storage if it does not exist in Store
 				err = os.Remove(path.Join(algPath, dgst))
 				if err != nil {
@@ -555,6 +563,17 @@ func (s *unsafeStore) Predecessors(ctx context.Context, node ocispec.Descriptor)
 	return s.graph.Predecessors(ctx, node)
 }
 
+// isContextDone returns an error if the context is done.
+// Reference: https://pkg.go.dev/context#Context
+func isContextDone(ctx context.Context) error {
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+		return nil
+	}
+}
+
 // validateReference validates ref.
 func validateReference(ref string) error {
 	if ref == "" {
diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index 6804a093..aacd19f0 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -2899,7 +2899,7 @@ func TestStore_GC(t *testing.T) {
 	appendBlob(ocispec.MediaTypeImageLayer, []byte("blob"))             // Blob 1
 	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer"))   // Blob 2, dangling layer
 	generateManifest(descs[0], nil, descs[1])                           // Blob 3, valid manifest
-	generateManifest(descs[0], &descs[3], descs[1])                     // Blob 4, referrer of a valid manifest
+	generateManifest(descs[0], &descs[3], descs[1])                     // Blob 4, referrer of a valid manifest, not in index.json
 	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer 2")) // Blob 5, dangling layer
 	generateArtifactManifest(descs[4])                                  // blob 6, dangling artifact
 	generateManifest(descs[0], &descs[5], descs[1])                     // Blob 7, referrer of a dangling manifest
@@ -2930,7 +2930,7 @@ func TestStore_GC(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	// remove blobs 4 - blobs 10 from index, making them a dangling tree
+	// remove blobs 4 - blobs 10 from index.json
 	for i := 4; i <= 10; i++ {
 		s.tagResolver.Untag(string(descs[i].Digest))
 	}
@@ -2953,6 +2953,17 @@ func TestStore_GC(t *testing.T) {
 		t.Fatal(err)
 	}
 
+	// confirm that all the blobs are in the storage
+	for i := 11; i < len(blobs); i++ {
+		exists, err := s.Exists(egCtx, descs[i])
+		if err != nil {
+			t.Fatal(err)
+		}
+		if !exists {
+			t.Fatalf("descs[%d] should exist", i)
+		}
+	}
+
 	// perform GC
 	if err = s.GC(egCtx); err != nil {
 		t.Fatal(err)

From b85b5aada03aec6cb432eab504a4cdbe2a8f32d7 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Thu, 4 Jan 2024 13:27:44 +0800
Subject: [PATCH 10/25] revert change in memory

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go            |   7 --
 internal/graph/memory.go      |  26 +++---
 internal/graph/memory_test.go | 167 ++++++++++++++++++----------------
 3 files changed, 103 insertions(+), 97 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index 0a8fdbce..bcfdec20 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -465,9 +465,6 @@ func (s *Store) traverseIndex(ctx context.Context) (set.Set[digest.Digest], erro
 	queue := []ocispec.Descriptor{}
 	queue = append(queue, manifests...)
 	for len(queue) > 0 {
-		if err := isContextDone(ctx); err != nil {
-			return nil, err
-		}
 		head := queue[0]
 		queue = queue[1:]
 		if visited.Contains(head.Digest) {
@@ -524,10 +521,6 @@ func (s *Store) GC(ctx context.Context) error {
 			return err
 		}
 		for _, dgstDir := range dgstDirs {
-			if err := isContextDone(ctx); err != nil {
-				return err
-			}
-
 			dgst := dgstDir.Name()
 			blobDigest := digest.NewDigestFromEncoded(digest.Algorithm(alg), dgst)
 			err := blobDigest.Validate()
diff --git a/internal/graph/memory.go b/internal/graph/memory.go
index 31a7dbec..b93df83e 100644
--- a/internal/graph/memory.go
+++ b/internal/graph/memory.go
@@ -20,11 +20,11 @@ import (
 	"errors"
 	"sync"
 
-	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"oras.land/oras-go/v2/content"
 	"oras.land/oras-go/v2/errdef"
 	"oras.land/oras-go/v2/internal/container/set"
+	"oras.land/oras-go/v2/internal/descriptor"
 	"oras.land/oras-go/v2/internal/status"
 	"oras.land/oras-go/v2/internal/syncutil"
 )
@@ -35,7 +35,7 @@ type Memory struct {
 	//  1. a node exists in Memory.nodes if and only if it exists in the memory
 	//  2. Memory.nodes saves the ocispec.Descriptor map keys, which are used by
 	//    the other fields.
-	nodes map[digest.Digest]ocispec.Descriptor
+	nodes map[descriptor.Descriptor]ocispec.Descriptor
 
 	// predecessors has the following properties and behaviors:
 	//  1. a node exists in Memory.predecessors if it has at least one predecessor
@@ -43,14 +43,14 @@ type Memory struct {
 	//    the memory.
 	//  2. a node does not exist in Memory.predecessors, if it doesn't have any predecessors
 	//    in the memory.
-	predecessors map[digest.Digest]set.Set[digest.Digest]
+	predecessors map[descriptor.Descriptor]set.Set[descriptor.Descriptor]
 
 	// successors has the following properties and behaviors:
 	//  1. a node exists in Memory.successors if and only if it exists in the memory.
 	//  2. a node's entry in Memory.successors is always consistent with the actual
 	//    content of the node, regardless of whether or not each successor exists
 	//    in the memory.
-	successors map[digest.Digest]set.Set[digest.Digest]
+	successors map[descriptor.Descriptor]set.Set[descriptor.Descriptor]
 
 	lock sync.RWMutex
 }
@@ -58,9 +58,9 @@ type Memory struct {
 // NewMemory creates a new memory PredecessorFinder.
 func NewMemory() *Memory {
 	return &Memory{
-		nodes:        make(map[digest.Digest]ocispec.Descriptor),
-		predecessors: make(map[digest.Digest]set.Set[digest.Digest]),
-		successors:   make(map[digest.Digest]set.Set[digest.Digest]),
+		nodes:        make(map[descriptor.Descriptor]ocispec.Descriptor),
+		predecessors: make(map[descriptor.Descriptor]set.Set[descriptor.Descriptor]),
+		successors:   make(map[descriptor.Descriptor]set.Set[descriptor.Descriptor]),
 	}
 }
 
@@ -107,7 +107,7 @@ func (m *Memory) Predecessors(_ context.Context, node ocispec.Descriptor) ([]oci
 	m.lock.RLock()
 	defer m.lock.RUnlock()
 
-	key := node.Digest
+	key := descriptor.FromOCI(node)
 	set, exists := m.predecessors[key]
 	if !exists {
 		return nil, nil
@@ -125,7 +125,7 @@ func (m *Memory) Remove(node ocispec.Descriptor) []ocispec.Descriptor {
 	m.lock.Lock()
 	defer m.lock.Unlock()
 
-	nodeKey := node.Digest
+	nodeKey := descriptor.FromOCI(node)
 	var danglings []ocispec.Descriptor
 	// remove the node from its successors' predecessor list
 	for successorKey := range m.successors[nodeKey] {
@@ -157,19 +157,19 @@ func (m *Memory) index(ctx context.Context, fetcher content.Fetcher, node ocispe
 	defer m.lock.Unlock()
 
 	// index the node
-	nodeKey := node.Digest
+	nodeKey := descriptor.FromOCI(node)
 	m.nodes[nodeKey] = node
 
 	// for each successor, put it into the node's successors list, and
 	// put node into the succeesor's predecessors list
-	successorSet := set.New[digest.Digest]()
+	successorSet := set.New[descriptor.Descriptor]()
 	m.successors[nodeKey] = successorSet
 	for _, successor := range successors {
-		successorKey := successor.Digest
+		successorKey := descriptor.FromOCI(successor)
 		successorSet.Add(successorKey)
 		predecessorSet, exists := m.predecessors[successorKey]
 		if !exists {
-			predecessorSet = set.New[digest.Digest]()
+			predecessorSet = set.New[descriptor.Descriptor]()
 			m.predecessors[successorKey] = predecessorSet
 		}
 		predecessorSet.Add(nodeKey)
diff --git a/internal/graph/memory_test.go b/internal/graph/memory_test.go
index de5eed0c..9b5bab33 100644
--- a/internal/graph/memory_test.go
+++ b/internal/graph/memory_test.go
@@ -26,6 +26,7 @@ import (
 	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"oras.land/oras-go/v2/internal/cas"
+	"oras.land/oras-go/v2/internal/descriptor"
 )
 
 // +------------------------------+
@@ -103,14 +104,19 @@ func TestMemory_IndexAndRemove(t *testing.T) {
 		t.Errorf("testFetcher.Fetch() = %v, want %v", got, blobs[4])
 	}
 
+	nodeKeyA := descriptor.FromOCI(descA)
+	nodeKeyB := descriptor.FromOCI(descB)
+	nodeKeyC := descriptor.FromOCI(descC)
+	nodeKeyD := descriptor.FromOCI(descD)
+
 	// index and check the information of node D
 	testMemory.Index(ctx, testFetcher, descD)
 	// 1. verify its existence in testMemory.nodes
-	if _, exists := testMemory.nodes[descD.Digest]; !exists {
+	if _, exists := testMemory.nodes[nodeKeyD]; !exists {
 		t.Errorf("nodes entry of %s should exist", "D")
 	}
 	// 2. verify that the entry of D exists in testMemory.successors and it's empty
-	successorsD, exists := testMemory.successors[descD.Digest]
+	successorsD, exists := testMemory.successors[nodeKeyD]
 	if !exists {
 		t.Errorf("successor entry of %s should exist", "D")
 	}
@@ -121,7 +127,7 @@ func TestMemory_IndexAndRemove(t *testing.T) {
 		t.Errorf("successors of %s should be empty", "D")
 	}
 	// 3. there should be no entry of D in testMemory.predecessors yet
-	_, exists = testMemory.predecessors[descD.Digest]
+	_, exists = testMemory.predecessors[nodeKeyD]
 	if exists {
 		t.Errorf("predecessor entry of %s should not exist yet", "D")
 	}
@@ -129,11 +135,11 @@ func TestMemory_IndexAndRemove(t *testing.T) {
 	// index and check the information of node C
 	testMemory.Index(ctx, testFetcher, descC)
 	// 1. verify its existence in memory.nodes
-	if _, exists := testMemory.nodes[descC.Digest]; !exists {
+	if _, exists := testMemory.nodes[nodeKeyC]; !exists {
 		t.Errorf("nodes entry of %s should exist", "C")
 	}
 	// 2. verify that the entry of C exists in testMemory.successors and it's empty
-	successorsC, exists := testMemory.successors[descC.Digest]
+	successorsC, exists := testMemory.successors[nodeKeyC]
 	if !exists {
 		t.Errorf("successor entry of %s should exist", "C")
 	}
@@ -144,7 +150,7 @@ func TestMemory_IndexAndRemove(t *testing.T) {
 		t.Errorf("successors of %s should be empty", "C")
 	}
 	// 3. there should be no entry of C in testMemory.predecessors yet
-	_, exists = testMemory.predecessors[descC.Digest]
+	_, exists = testMemory.predecessors[nodeKeyC]
 	if exists {
 		t.Errorf("predecessor entry of %s should not exist yet", "C")
 	}
@@ -152,48 +158,48 @@ func TestMemory_IndexAndRemove(t *testing.T) {
 	// index and check the information of node A
 	testMemory.Index(ctx, testFetcher, descA)
 	// 1. verify its existence in testMemory.nodes
-	if _, exists := testMemory.nodes[descA.Digest]; !exists {
+	if _, exists := testMemory.nodes[nodeKeyA]; !exists {
 		t.Errorf("nodes entry of %s should exist", "A")
 	}
 	// 2. verify that the entry of A exists in testMemory.successors and it contains
 	// node B and node D
-	successorsA, exists := testMemory.successors[descA.Digest]
+	successorsA, exists := testMemory.successors[nodeKeyA]
 	if !exists {
 		t.Errorf("successor entry of %s should exist", "A")
 	}
 	if successorsA == nil {
 		t.Errorf("successors of %s should be a set, not nil", "A")
 	}
-	if !successorsA.Contains(descB.Digest) {
+	if !successorsA.Contains(nodeKeyB) {
 		t.Errorf("successors of %s should contain %s", "A", "B")
 	}
-	if !successorsA.Contains(descD.Digest) {
+	if !successorsA.Contains(nodeKeyD) {
 		t.Errorf("successors of %s should contain %s", "A", "D")
 	}
 	// 3. verify that node A exists in the predecessors lists of its successors.
 	// there should be an entry of D in testMemory.predecessors by now and it
 	// should contain A but not B
-	predecessorsD, exists := testMemory.predecessors[descD.Digest]
+	predecessorsD, exists := testMemory.predecessors[nodeKeyD]
 	if !exists {
 		t.Errorf("predecessor entry of %s should exist by now", "D")
 	}
-	if !predecessorsD.Contains(descA.Digest) {
+	if !predecessorsD.Contains(nodeKeyA) {
 		t.Errorf("predecessors of %s should contain %s", "D", "A")
 	}
-	if predecessorsD.Contains(descB.Digest) {
+	if predecessorsD.Contains(nodeKeyB) {
 		t.Errorf("predecessors of %s should not contain %s yet", "D", "B")
 	}
 	// there should be an entry of B in testMemory.predecessors now
 	// and it should contain A
-	predecessorsB, exists := testMemory.predecessors[descB.Digest]
+	predecessorsB, exists := testMemory.predecessors[nodeKeyB]
 	if !exists {
 		t.Errorf("predecessor entry of %s should exist by now", "B")
 	}
-	if !predecessorsB.Contains(descA.Digest) {
+	if !predecessorsB.Contains(nodeKeyA) {
 		t.Errorf("predecessors of %s should contain %s", "B", "A")
 	}
 	// 4. there should be no entry of A in testMemory.predecessors
-	_, exists = testMemory.predecessors[descA.Digest]
+	_, exists = testMemory.predecessors[nodeKeyA]
 	if exists {
 		t.Errorf("predecessor entry of %s should not exist", "A")
 	}
@@ -201,100 +207,100 @@ func TestMemory_IndexAndRemove(t *testing.T) {
 	// index and check the information of node B
 	testMemory.Index(ctx, testFetcher, descB)
 	// 1. verify its existence in testMemory.nodes
-	if _, exists := testMemory.nodes[descB.Digest]; !exists {
+	if _, exists := testMemory.nodes[nodeKeyB]; !exists {
 		t.Errorf("nodes entry of %s should exist", "B")
 	}
 	// 2. verify that the entry of B exists in testMemory.successors and it contains
 	// node C and node D
-	successorsB, exists := testMemory.successors[descB.Digest]
+	successorsB, exists := testMemory.successors[nodeKeyB]
 	if !exists {
 		t.Errorf("successor entry of %s should exist", "B")
 	}
 	if successorsB == nil {
 		t.Errorf("successors of %s should be a set, not nil", "B")
 	}
-	if !successorsB.Contains(descC.Digest) {
+	if !successorsB.Contains(nodeKeyC) {
 		t.Errorf("successors of %s should contain %s", "B", "C")
 	}
-	if !successorsB.Contains(descD.Digest) {
+	if !successorsB.Contains(nodeKeyD) {
 		t.Errorf("successors of %s should contain %s", "B", "D")
 	}
 	// 3. verify that node B exists in the predecessors lists of its successors.
 	// there should be an entry of C in testMemory.predecessors by now
 	// and it should contain B
-	predecessorsC, exists := testMemory.predecessors[descC.Digest]
+	predecessorsC, exists := testMemory.predecessors[nodeKeyC]
 	if !exists {
 		t.Errorf("predecessor entry of %s should exist by now", "C")
 	}
-	if !predecessorsC.Contains(descB.Digest) {
+	if !predecessorsC.Contains(nodeKeyB) {
 		t.Errorf("predecessors of %s should contain %s", "C", "B")
 	}
 	// predecessors of D should have been updated now to have node A and B
-	if !predecessorsD.Contains(descB.Digest) {
+	if !predecessorsD.Contains(nodeKeyB) {
 		t.Errorf("predecessors of %s should contain %s", "D", "B")
 	}
-	if !predecessorsD.Contains(descA.Digest) {
+	if !predecessorsD.Contains(nodeKeyA) {
 		t.Errorf("predecessors of %s should contain %s", "D", "A")
 	}
 
 	// remove node B and check the stored information
 	testMemory.Remove(descB)
 	// 1. verify that node B no longer exists in testMemory.nodes
-	if _, exists := testMemory.nodes[descB.Digest]; exists {
+	if _, exists := testMemory.nodes[nodeKeyB]; exists {
 		t.Errorf("nodes entry of %s should no longer exist", "B")
 	}
 	// 2. verify B' predecessors info: B's entry in testMemory.predecessors should
 	// still exist, since its predecessor A still exists
-	predecessorsB, exists = testMemory.predecessors[descB.Digest]
+	predecessorsB, exists = testMemory.predecessors[nodeKeyB]
 	if !exists {
 		t.Errorf("testDeletableMemory.predecessors should still contain the entry of %s", "B")
 	}
-	if !predecessorsB.Contains(descA.Digest) {
+	if !predecessorsB.Contains(nodeKeyA) {
 		t.Errorf("predecessors of %s should still contain %s", "B", "A")
 	}
 	// 3. verify B' successors info: B's entry in testMemory.successors should no
 	// longer exist
-	if _, exists := testMemory.successors[descB.Digest]; exists {
+	if _, exists := testMemory.successors[nodeKeyB]; exists {
 		t.Errorf("testDeletableMemory.successors should not contain the entry of %s", "B")
 	}
 	// 4. verify B' predecessors' successors info: B should still exist in A's
 	// successors
-	if !successorsA.Contains(descB.Digest) {
+	if !successorsA.Contains(nodeKeyB) {
 		t.Errorf("successors of %s should still contain %s", "A", "B")
 	}
 	// 5. verify B' successors' predecessors info: C's entry in testMemory.predecessors
 	// should no longer exist, since C's only predecessor B is already deleted
-	if _, exists = testMemory.predecessors[descC.Digest]; exists {
+	if _, exists = testMemory.predecessors[nodeKeyC]; exists {
 		t.Errorf("predecessor entry of %s should no longer exist by now, since all its predecessors have been deleted", "C")
 	}
 	// B should no longer exist in D's predecessors
-	if predecessorsD.Contains(descB.Digest) {
+	if predecessorsD.Contains(nodeKeyB) {
 		t.Errorf("predecessors of %s should not contain %s", "D", "B")
 	}
 	// but A still exists in D's predecessors
-	if !predecessorsD.Contains(descA.Digest) {
+	if !predecessorsD.Contains(nodeKeyA) {
 		t.Errorf("predecessors of %s should still contain %s", "D", "A")
 	}
 
 	// remove node A and check the stored information
 	testMemory.Remove(descA)
 	// 1. verify that node A no longer exists in testMemory.nodes
-	if _, exists := testMemory.nodes[descA.Digest]; exists {
+	if _, exists := testMemory.nodes[nodeKeyA]; exists {
 		t.Errorf("nodes entry of %s should no longer exist", "A")
 	}
 	// 2. verify A' successors info: A's entry in testMemory.successors should no
 	// longer exist
-	if _, exists := testMemory.successors[descA.Digest]; exists {
+	if _, exists := testMemory.successors[nodeKeyA]; exists {
 		t.Errorf("testDeletableMemory.successors should not contain the entry of %s", "A")
 	}
 	// 3. verify A' successors' predecessors info: D's entry in testMemory.predecessors
 	// should no longer exist, since all predecessors of D are already deleted
-	if _, exists = testMemory.predecessors[descD.Digest]; exists {
+	if _, exists = testMemory.predecessors[nodeKeyD]; exists {
 		t.Errorf("predecessor entry of %s should no longer exist by now, since all its predecessors have been deleted", "D")
 	}
 	// B's entry in testMemory.predecessors should no longer exist, since B's only
 	// predecessor A is already deleted
-	if _, exists = testMemory.predecessors[descB.Digest]; exists {
+	if _, exists = testMemory.predecessors[nodeKeyB]; exists {
 		t.Errorf("predecessor entry of %s should no longer exist by now, since all its predecessors have been deleted", "B")
 	}
 }
@@ -386,99 +392,106 @@ func TestMemory_IndexAllAndPredecessors(t *testing.T) {
 		t.Errorf("testFetcher.Fetch() = %v, want %v", got, blobs[4])
 	}
 
+	nodeKeyA := descriptor.FromOCI(descA)
+	nodeKeyB := descriptor.FromOCI(descB)
+	nodeKeyC := descriptor.FromOCI(descC)
+	nodeKeyD := descriptor.FromOCI(descD)
+	nodeKeyE := descriptor.FromOCI(descE)
+	nodeKeyF := descriptor.FromOCI(descF)
+
 	// index node A into testMemory using IndexAll
 	testMemory.IndexAll(ctx, testFetcher, descA)
 
 	// check the information of node A
 	// 1. verify that node A exists in testMemory.nodes
-	if _, exists := testMemory.nodes[descA.Digest]; !exists {
+	if _, exists := testMemory.nodes[nodeKeyA]; !exists {
 		t.Errorf("nodes entry of %s should exist", "A")
 	}
 	// 2. verify that there is no entry of A in predecessors
-	if _, exists := testMemory.predecessors[descA.Digest]; exists {
+	if _, exists := testMemory.predecessors[nodeKeyA]; exists {
 		t.Errorf("there should be no entry of %s in predecessors", "A")
 	}
 	// 3. verify that A has successors B, C, D
-	successorsA, exists := testMemory.successors[descA.Digest]
+	successorsA, exists := testMemory.successors[nodeKeyA]
 	if !exists {
 		t.Errorf("there should be an entry of %s in successors", "A")
 	}
-	if !successorsA.Contains(descB.Digest) {
+	if !successorsA.Contains(nodeKeyB) {
 		t.Errorf("successors of %s should contain %s", "A", "B")
 	}
-	if !successorsA.Contains(descC.Digest) {
+	if !successorsA.Contains(nodeKeyC) {
 		t.Errorf("successors of %s should contain %s", "A", "C")
 	}
-	if !successorsA.Contains(descD.Digest) {
+	if !successorsA.Contains(nodeKeyD) {
 		t.Errorf("successors of %s should contain %s", "A", "D")
 	}
 
 	// check the information of node B
 	// 1. verify that node B exists in testMemory.nodes
-	if _, exists := testMemory.nodes[descB.Digest]; !exists {
+	if _, exists := testMemory.nodes[nodeKeyB]; !exists {
 		t.Errorf("nodes entry of %s should exist", "B")
 	}
 	// 2. verify that B has node A in its predecessors
-	predecessorsB := testMemory.predecessors[descB.Digest]
-	if !predecessorsB.Contains(descA.Digest) {
+	predecessorsB := testMemory.predecessors[nodeKeyB]
+	if !predecessorsB.Contains(nodeKeyA) {
 		t.Errorf("predecessors of %s should contain %s", "B", "A")
 	}
 	// 3. verify that B has node E in its successors
-	successorsB := testMemory.successors[descB.Digest]
-	if !successorsB.Contains(descE.Digest) {
+	successorsB := testMemory.successors[nodeKeyB]
+	if !successorsB.Contains(nodeKeyE) {
 		t.Errorf("successors of %s should contain %s", "B", "E")
 	}
 
 	// check the information of node C
 	// 1. verify that node C exists in testMemory.nodes
-	if _, exists := testMemory.nodes[descC.Digest]; !exists {
+	if _, exists := testMemory.nodes[nodeKeyC]; !exists {
 		t.Errorf("nodes entry of %s should exist", "C")
 	}
 	// 2. verify that C has node A in its predecessors
-	predecessorsC := testMemory.predecessors[descC.Digest]
-	if !predecessorsC.Contains(descA.Digest) {
+	predecessorsC := testMemory.predecessors[nodeKeyC]
+	if !predecessorsC.Contains(nodeKeyA) {
 		t.Errorf("predecessors of %s should contain %s", "C", "A")
 	}
 	// 3. verify that C has node E and F in its successors
-	successorsC := testMemory.successors[descC.Digest]
-	if !successorsC.Contains(descE.Digest) {
+	successorsC := testMemory.successors[nodeKeyC]
+	if !successorsC.Contains(nodeKeyE) {
 		t.Errorf("successors of %s should contain %s", "C", "E")
 	}
-	if !successorsC.Contains(descF.Digest) {
+	if !successorsC.Contains(nodeKeyF) {
 		t.Errorf("successors of %s should contain %s", "C", "F")
 	}
 
 	// check the information of node D
 	// 1. verify that node D exists in testMemory.nodes
-	if _, exists := testMemory.nodes[descD.Digest]; !exists {
+	if _, exists := testMemory.nodes[nodeKeyD]; !exists {
 		t.Errorf("nodes entry of %s should exist", "D")
 	}
 	// 2. verify that D has node A in its predecessors
-	predecessorsD := testMemory.predecessors[descD.Digest]
-	if !predecessorsD.Contains(descA.Digest) {
+	predecessorsD := testMemory.predecessors[nodeKeyD]
+	if !predecessorsD.Contains(nodeKeyA) {
 		t.Errorf("predecessors of %s should contain %s", "D", "A")
 	}
 	// 3. verify that D has node F in its successors
-	successorsD := testMemory.successors[descD.Digest]
-	if !successorsD.Contains(descF.Digest) {
+	successorsD := testMemory.successors[nodeKeyD]
+	if !successorsD.Contains(nodeKeyF) {
 		t.Errorf("successors of %s should contain %s", "D", "F")
 	}
 
 	// check the information of node E
 	// 1. verify that node E exists in testMemory.nodes
-	if _, exists := testMemory.nodes[descE.Digest]; !exists {
+	if _, exists := testMemory.nodes[nodeKeyE]; !exists {
 		t.Errorf("nodes entry of %s should exist", "E")
 	}
 	// 2. verify that E has node B and C in its predecessors
-	predecessorsE := testMemory.predecessors[descE.Digest]
-	if !predecessorsE.Contains(descB.Digest) {
+	predecessorsE := testMemory.predecessors[nodeKeyE]
+	if !predecessorsE.Contains(nodeKeyB) {
 		t.Errorf("predecessors of %s should contain %s", "E", "B")
 	}
-	if !predecessorsE.Contains(descC.Digest) {
+	if !predecessorsE.Contains(nodeKeyC) {
 		t.Errorf("predecessors of %s should contain %s", "E", "C")
 	}
 	// 3. verify that E has an entry in successors and it's empty
-	successorsE, exists := testMemory.successors[descE.Digest]
+	successorsE, exists := testMemory.successors[nodeKeyE]
 	if !exists {
 		t.Errorf("entry %s should exist in testMemory.successors", "E")
 	}
@@ -491,19 +504,19 @@ func TestMemory_IndexAllAndPredecessors(t *testing.T) {
 
 	// check the information of node F
 	// 1. verify that node F exists in testMemory.nodes
-	if _, exists := testMemory.nodes[descF.Digest]; !exists {
+	if _, exists := testMemory.nodes[nodeKeyF]; !exists {
 		t.Errorf("nodes entry of %s should exist", "F")
 	}
 	// 2. verify that F has node C and D in its predecessors
-	predecessorsF := testMemory.predecessors[descF.Digest]
-	if !predecessorsF.Contains(descC.Digest) {
+	predecessorsF := testMemory.predecessors[nodeKeyF]
+	if !predecessorsF.Contains(nodeKeyC) {
 		t.Errorf("predecessors of %s should contain %s", "F", "C")
 	}
-	if !predecessorsF.Contains(descD.Digest) {
+	if !predecessorsF.Contains(nodeKeyD) {
 		t.Errorf("predecessors of %s should contain %s", "F", "D")
 	}
 	// 3. verify that F has an entry in successors and it's empty
-	successorsF, exists := testMemory.successors[descF.Digest]
+	successorsF, exists := testMemory.successors[nodeKeyF]
 	if !exists {
 		t.Errorf("entry %s should exist in testMemory.successors", "F")
 	}
@@ -544,34 +557,34 @@ func TestMemory_IndexAllAndPredecessors(t *testing.T) {
 
 	// remove node C and check the stored information
 	testMemory.Remove(descC)
-	if predecessorsE.Contains(descC.Digest) {
+	if predecessorsE.Contains(nodeKeyC) {
 		t.Errorf("predecessors of %s should not contain %s", "E", "C")
 	}
-	if predecessorsF.Contains(descC.Digest) {
+	if predecessorsF.Contains(nodeKeyC) {
 		t.Errorf("predecessors of %s should not contain %s", "F", "C")
 	}
-	if !successorsA.Contains(descC.Digest) {
+	if !successorsA.Contains(nodeKeyC) {
 		t.Errorf("successors of %s should still contain %s", "A", "C")
 	}
-	if _, exists := testMemory.successors[descC.Digest]; exists {
+	if _, exists := testMemory.successors[nodeKeyC]; exists {
 		t.Errorf("testMemory.successors should not contain the entry of %s", "C")
 	}
-	if _, exists := testMemory.predecessors[descC.Digest]; !exists {
+	if _, exists := testMemory.predecessors[nodeKeyC]; !exists {
 		t.Errorf("entry %s in predecessors should still exists since it still has at least one predecessor node present", "C")
 	}
 
 	// remove node A and check the stored information
 	testMemory.Remove(descA)
-	if _, exists := testMemory.predecessors[descB.Digest]; exists {
+	if _, exists := testMemory.predecessors[nodeKeyB]; exists {
 		t.Errorf("entry %s in predecessors should no longer exists", "B")
 	}
-	if _, exists := testMemory.predecessors[descC.Digest]; exists {
+	if _, exists := testMemory.predecessors[nodeKeyC]; exists {
 		t.Errorf("entry %s in predecessors should no longer exists", "C")
 	}
-	if _, exists := testMemory.predecessors[descD.Digest]; exists {
+	if _, exists := testMemory.predecessors[nodeKeyD]; exists {
 		t.Errorf("entry %s in predecessors should no longer exists", "D")
 	}
-	if _, exists := testMemory.successors[descA.Digest]; exists {
+	if _, exists := testMemory.successors[nodeKeyA]; exists {
 		t.Errorf("testDeletableMemory.successors should not contain the entry of %s", "A")
 	}
 

From ca35c14b4da9946df41f091e522fb6e7af19a711 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Thu, 4 Jan 2024 13:45:38 +0800
Subject: [PATCH 11/25] fixed context issue

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go      |  6 ++++++
 content/oci/oci_test.go | 39 +++++++++++----------------------------
 2 files changed, 17 insertions(+), 28 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index bcfdec20..e2b3d86c 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -465,6 +465,9 @@ func (s *Store) traverseIndex(ctx context.Context) (set.Set[digest.Digest], erro
 	queue := []ocispec.Descriptor{}
 	queue = append(queue, manifests...)
 	for len(queue) > 0 {
+		if err := isContextDone(ctx); err != nil {
+			return nil, err
+		}
 		head := queue[0]
 		queue = queue[1:]
 		if visited.Contains(head.Digest) {
@@ -521,6 +524,9 @@ func (s *Store) GC(ctx context.Context) error {
 			return err
 		}
 		for _, dgstDir := range dgstDirs {
+			if err := isContextDone(ctx); err != nil {
+				return err
+			}
 			dgst := dgstDir.Name()
 			blobDigest := digest.NewDigestFromEncoded(digest.Algorithm(alg), dgst)
 			err := blobDigest.Validate()
diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index aacd19f0..c4af37a2 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -2914,20 +2914,11 @@ func TestStore_GC(t *testing.T) {
 	generateManifest(descs[0], &descs[13], descs[1])                    // Blob 16, referrer of a garbage manifest
 
 	// push blobs 0 - blobs 10 into s
-	eg, egCtx := errgroup.WithContext(ctx)
 	for i := 0; i <= 10; i++ {
-		eg.Go(func(i int) func() error {
-			return func() error {
-				err := s.Push(egCtx, descs[i], bytes.NewReader(blobs[i]))
-				if err != nil {
-					return fmt.Errorf("failed to push test content to src: %d: %v", i, err)
-				}
-				return nil
-			}
-		}(i))
-	}
-	if err := eg.Wait(); err != nil {
-		t.Fatal(err)
+		err := s.Push(ctx, descs[i], bytes.NewReader(blobs[i]))
+		if err != nil {
+			t.Errorf("failed to push test content to src: %d: %v", i, err)
+		}
 	}
 
 	// remove blobs 4 - blobs 10 from index.json
@@ -2939,23 +2930,15 @@ func TestStore_GC(t *testing.T) {
 	// push blobs 11 - blobs 16 into s.storage, making them garbage as their metadata
 	// doesn't exist in s
 	for i := 11; i < len(blobs); i++ {
-		eg.Go(func(i int) func() error {
-			return func() error {
-				err := s.storage.Push(egCtx, descs[i], bytes.NewReader(blobs[i]))
-				if err != nil {
-					return fmt.Errorf("failed to push test content to src: %d: %v", i, err)
-				}
-				return nil
-			}
-		}(i))
-	}
-	if err := eg.Wait(); err != nil {
-		t.Fatal(err)
+		err := s.storage.Push(ctx, descs[i], bytes.NewReader(blobs[i]))
+		if err != nil {
+			t.Errorf("failed to push test content to src: %d: %v", i, err)
+		}
 	}
 
 	// confirm that all the blobs are in the storage
 	for i := 11; i < len(blobs); i++ {
-		exists, err := s.Exists(egCtx, descs[i])
+		exists, err := s.Exists(ctx, descs[i])
 		if err != nil {
 			t.Fatal(err)
 		}
@@ -2965,14 +2948,14 @@ func TestStore_GC(t *testing.T) {
 	}
 
 	// perform GC
-	if err = s.GC(egCtx); err != nil {
+	if err = s.GC(ctx); err != nil {
 		t.Fatal(err)
 	}
 
 	// verify existence
 	wantExistence := []bool{true, true, false, true, true, false, false, false, false, false, false, false, false, false, false, false, false}
 	for i, wantValue := range wantExistence {
-		exists, err := s.Exists(egCtx, descs[i])
+		exists, err := s.Exists(ctx, descs[i])
 		if err != nil {
 			t.Fatal(err)
 		}

From e18dd2f9f8aab6162c7fb9d23bbeedac64ed15bf Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Thu, 4 Jan 2024 15:37:47 +0800
Subject: [PATCH 12/25] increase coverage

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci_test.go | 44 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 44 insertions(+)

diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index c4af37a2..2495760f 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -24,6 +24,7 @@ import (
 	"fmt"
 	"io"
 	"os"
+	"path"
 	"path/filepath"
 	"reflect"
 	"strconv"
@@ -2965,6 +2966,49 @@ func TestStore_GC(t *testing.T) {
 	}
 }
 
+func TestStore_GCErrorPath(t *testing.T) {
+	tempDir := t.TempDir()
+	s, err := New(tempDir)
+	if err != nil {
+		t.Fatal("New() error =", err)
+	}
+	ctx := context.Background()
+
+	// generate test content
+	var blobs [][]byte
+	var descs []ocispec.Descriptor
+	appendBlob := func(mediaType string, blob []byte) {
+		blobs = append(blobs, blob)
+		descs = append(descs, ocispec.Descriptor{
+			MediaType: mediaType,
+			Digest:    digest.FromBytes(blob),
+			Size:      int64(len(blob)),
+		})
+	}
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("valid blob")) // Blob 0
+
+	// push the valid blob
+	err = s.Push(ctx, descs[0], bytes.NewReader(blobs[0]))
+	if err != nil {
+		t.Error("failed to push test content to src")
+	}
+
+	// write random contents
+	algPath := path.Join(tempDir, "blobs")
+	dgstPath := path.Join(algPath, "sha256")
+	if err := os.WriteFile(path.Join(algPath, "other"), []byte("random"), 0444); err != nil {
+		t.Fatal("error calling WriteFile(), error =", err)
+	}
+	if err := os.WriteFile(path.Join(dgstPath, "other2"), []byte("random2"), 0444); err != nil {
+		t.Fatal("error calling WriteFile(), error =", err)
+	}
+
+	// perform GC
+	if err = s.GC(ctx); err != nil {
+		t.Fatal(err)
+	}
+}
+
 func equalDescriptorSet(actual []ocispec.Descriptor, expected []ocispec.Descriptor) bool {
 	if len(actual) != len(expected) {
 		return false

From b174f14c3a2318a4873dc64ddeea7a1e1450327d Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Thu, 4 Jan 2024 16:22:34 +0800
Subject: [PATCH 13/25] increase coverage

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go      |  6 +-----
 content/oci/oci_test.go | 21 +++++++++++++++++++++
 2 files changed, 22 insertions(+), 5 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index e2b3d86c..5358b080 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -23,7 +23,6 @@ import (
 	"errors"
 	"fmt"
 	"io"
-	"io/fs"
 	"os"
 	"path"
 	"path/filepath"
@@ -500,7 +499,7 @@ func (s *Store) GC(ctx context.Context) error {
 	s.sync.Lock()
 	defer s.sync.Unlock()
 
-	//traverse index.json to find all reachable nodes
+	// traverse index.json to find all reachable nodes
 	reachableNodes, err := s.traverseIndex(ctx)
 	if err != nil {
 		return err
@@ -538,9 +537,6 @@ func (s *Store) GC(ctx context.Context) error {
 				// remove the blob from storage if it does not exist in Store
 				err = os.Remove(path.Join(algPath, dgst))
 				if err != nil {
-					if errors.Is(err, fs.ErrNotExist) {
-						return fmt.Errorf("%s: %w", blobDigest, errdef.ErrNotFound)
-					}
 					return err
 				}
 			}
diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index 2495760f..6ceb0930 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -3007,6 +3007,27 @@ func TestStore_GCErrorPath(t *testing.T) {
 	if err = s.GC(ctx); err != nil {
 		t.Fatal(err)
 	}
+
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("valid blob 2")) // Blob 1
+
+	// push the valid blob
+	err = s.Push(ctx, descs[1], bytes.NewReader(blobs[1]))
+	if err != nil {
+		t.Error("failed to push test content to src")
+	}
+
+	// test os.ReadDir() errors
+	s.root = "random dir"
+	if err = s.GC(ctx); err == nil {
+		t.Fatal("expect an error when os.ReadDir()")
+	}
+	s.root = tempDir
+	if err := os.WriteFile(path.Join(algPath, "sha384"), []byte("not a dir"), 0444); err != nil {
+		t.Fatal("error calling WriteFile(), error =", err)
+	}
+	if err = s.GC(ctx); err == nil {
+		t.Fatal("expect an error when os.ReadDir()")
+	}
 }
 
 func equalDescriptorSet(actual []ocispec.Descriptor, expected []ocispec.Descriptor) bool {

From 60624cbe8e3c7197813aa62433bd008f4e58aa7c Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Thu, 4 Jan 2024 16:56:59 +0800
Subject: [PATCH 14/25] increase coverage

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci_test.go | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index 6ceb0930..8ce2ba57 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -3028,6 +3028,20 @@ func TestStore_GCErrorPath(t *testing.T) {
 	if err = s.GC(ctx); err == nil {
 		t.Fatal("expect an error when os.ReadDir()")
 	}
+	os.Remove(path.Join(algPath, "sha384"))
+
+	// test os.Remove() error
+	badDigest := digest.FromBytes([]byte("bad digest")).Encoded()
+	badPath := path.Join(algPath, "sha256", badDigest)
+	if err := os.Mkdir(badPath, 0444); err != nil {
+		t.Fatal(err)
+	}
+	if err := os.WriteFile(path.Join(badPath, "whatever"), []byte("extra content"), 0444); err != nil {
+		t.Fatal("error calling WriteFile(), error =", err)
+	}
+	if err = s.GC(ctx); err == nil {
+		t.Fatal("expect an error when os.Remove()")
+	}
 }
 
 func equalDescriptorSet(actual []ocispec.Descriptor, expected []ocispec.Descriptor) bool {

From bb899fa3539cdd3a1f96c22fe74508c09aff6629 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Thu, 4 Jan 2024 17:01:40 +0800
Subject: [PATCH 15/25] increase coverage

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci_test.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index 8ce2ba57..665ab88b 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -3033,7 +3033,7 @@ func TestStore_GCErrorPath(t *testing.T) {
 	// test os.Remove() error
 	badDigest := digest.FromBytes([]byte("bad digest")).Encoded()
 	badPath := path.Join(algPath, "sha256", badDigest)
-	if err := os.Mkdir(badPath, 0444); err != nil {
+	if err := os.Mkdir(badPath, 0777); err != nil {
 		t.Fatal(err)
 	}
 	if err := os.WriteFile(path.Join(badPath, "whatever"), []byte("extra content"), 0444); err != nil {

From f896e13e60b4cc331acada20a822a0c524f3e6be Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Thu, 4 Jan 2024 17:42:00 +0800
Subject: [PATCH 16/25] remove extra set

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go      | 4 +---
 content/oci/oci_test.go | 4 +++-
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index 5358b080..200d0bbf 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -459,7 +459,6 @@ func (s *Store) writeIndexFile() error {
 // successor and referrer relations. It returns a set of digest of visited nodes.
 func (s *Store) traverseIndex(ctx context.Context) (set.Set[digest.Digest], error) {
 	manifests := s.index.Manifests
-	visited := set.New[digest.Digest]()
 	results := set.New[digest.Digest]()
 	queue := []ocispec.Descriptor{}
 	queue = append(queue, manifests...)
@@ -469,11 +468,10 @@ func (s *Store) traverseIndex(ctx context.Context) (set.Set[digest.Digest], erro
 		}
 		head := queue[0]
 		queue = queue[1:]
-		if visited.Contains(head.Digest) {
+		if results.Contains(head.Digest) {
 			continue
 		}
 		results.Add(head.Digest)
-		visited.Add(head.Digest)
 		// find successors
 		succ, err := content.Successors(ctx, &unsafeStore{s}, head)
 		if err != nil {
diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index 665ab88b..1da20891 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -3028,7 +3028,9 @@ func TestStore_GCErrorPath(t *testing.T) {
 	if err = s.GC(ctx); err == nil {
 		t.Fatal("expect an error when os.ReadDir()")
 	}
-	os.Remove(path.Join(algPath, "sha384"))
+	if err := os.Remove(path.Join(algPath, "sha384")); err != nil {
+		t.Fatal(err)
+	}
 
 	// test os.Remove() error
 	badDigest := digest.FromBytes([]byte("bad digest")).Encoded()

From eb087c092cdacc134a37658091d9c71904435933 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Mon, 8 Jan 2024 16:41:00 +0800
Subject: [PATCH 17/25] resolved comments

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go      | 15 ++++++++++-----
 content/oci/oci_test.go |  4 ++--
 2 files changed, 12 insertions(+), 7 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index 200d0bbf..89fa0027 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -512,13 +512,13 @@ func (s *Store) GC(ctx context.Context) error {
 	for _, algDir := range algDirs {
 		alg := algDir.Name()
 		// skip unsupported directories
-		if !isValidAlgorithm(alg) {
+		if !isKnownAlgorithm(alg) {
 			continue
 		}
 		algPath := path.Join(rootpath, alg)
 		dgstDirs, err := os.ReadDir(algPath)
 		if err != nil {
-			return err
+			continue
 		}
 		for _, dgstDir := range dgstDirs {
 			if err := isContextDone(ctx); err != nil {
@@ -577,7 +577,12 @@ func validateReference(ref string) error {
 	return nil
 }
 
-// isValidAlgorithm checks is a string is a supported hash algorithm
-func isValidAlgorithm(alg string) bool {
-	return alg == string(digest.SHA256) || alg == string(digest.SHA512) || alg == string(digest.SHA384)
+// isKnownAlgorithm checks is a string is a supported hash algorithm
+func isKnownAlgorithm(alg string) bool {
+	switch digest.Algorithm(alg) {
+	case digest.SHA256, digest.SHA512, digest.SHA384:
+		return true
+	default:
+		return false
+	}
 }
diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index 1da20891..f356c3af 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -3025,8 +3025,8 @@ func TestStore_GCErrorPath(t *testing.T) {
 	if err := os.WriteFile(path.Join(algPath, "sha384"), []byte("not a dir"), 0444); err != nil {
 		t.Fatal("error calling WriteFile(), error =", err)
 	}
-	if err = s.GC(ctx); err == nil {
-		t.Fatal("expect an error when os.ReadDir()")
+	if err = s.GC(ctx); err != nil {
+		t.Fatal("this error should be silently ignored")
 	}
 	if err := os.Remove(path.Join(algPath, "sha384")); err != nil {
 		t.Fatal(err)

From 2b62edbac84e2f7e528861c2ba7ef725d3eb277f Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Tue, 9 Jan 2024 16:52:22 +0800
Subject: [PATCH 18/25] reload index

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go       |  53 +++++--------
 content/oci/oci_test.go  | 163 +++++++++++++++++++--------------------
 internal/graph/memory.go |  10 +++
 3 files changed, 109 insertions(+), 117 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index 89fa0027..034a43e8 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -455,39 +455,17 @@ func (s *Store) writeIndexFile() error {
 	return os.WriteFile(s.indexPath, indexJSON, 0666)
 }
 
-// traverseIndex starts from index.json and visits every node reachable by the
-// successor and referrer relations. It returns a set of digest of visited nodes.
-func (s *Store) traverseIndex(ctx context.Context) (set.Set[digest.Digest], error) {
-	manifests := s.index.Manifests
-	results := set.New[digest.Digest]()
-	queue := []ocispec.Descriptor{}
-	queue = append(queue, manifests...)
-	for len(queue) > 0 {
-		if err := isContextDone(ctx); err != nil {
-			return nil, err
-		}
-		head := queue[0]
-		queue = queue[1:]
-		if results.Contains(head.Digest) {
-			continue
-		}
-		results.Add(head.Digest)
-		// find successors
-		succ, err := content.Successors(ctx, &unsafeStore{s}, head)
-		if err != nil {
-			return nil, err
-		}
-		queue = append(queue, succ...)
-		// find referrers
-		if descriptor.IsManifest(head) {
-			refs, err := registry.Referrers(ctx, &unsafeStore{s}, head, "")
-			if err != nil {
-				return nil, err
-			}
-			queue = append(queue, refs...)
-		}
+// reloadIndex reloads the index and updates metadata by creating a new store.
+func (s *Store) reloadIndex(ctx context.Context) error {
+	newStore, err := NewWithContext(ctx, s.root)
+	if err != nil {
+		return err
 	}
-	return results, nil
+	s.index = newStore.index
+	s.storage = newStore.storage
+	s.tagResolver = newStore.tagResolver
+	s.graph = newStore.graph
+	return nil
 }
 
 // GC removes garbage from Store. The garbage to be cleaned are:
@@ -497,11 +475,12 @@ func (s *Store) GC(ctx context.Context) error {
 	s.sync.Lock()
 	defer s.sync.Unlock()
 
-	// traverse index.json to find all reachable nodes
-	reachableNodes, err := s.traverseIndex(ctx)
+	// get reachable nodes by reloading the index
+	err := s.reloadIndex(ctx)
 	if err != nil {
-		return err
+		return fmt.Errorf("unable to reload index: %w", err)
 	}
+	reachableNodes := s.graph.DigestSet()
 
 	// clean up garbage blobs in the storage
 	rootpath := filepath.Join(s.root, "blobs")
@@ -510,6 +489,10 @@ func (s *Store) GC(ctx context.Context) error {
 		return err
 	}
 	for _, algDir := range algDirs {
+		// if !algDir.IsDir() {
+		// 	fmt.Println("got here")
+		// 	continue
+		// }
 		alg := algDir.Name()
 		// skip unsupported directories
 		if !isKnownAlgorithm(alg) {
diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index f356c3af..5b774e91 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -24,7 +24,6 @@ import (
 	"fmt"
 	"io"
 	"os"
-	"path"
 	"path/filepath"
 	"reflect"
 	"strconv"
@@ -2900,7 +2899,7 @@ func TestStore_GC(t *testing.T) {
 	appendBlob(ocispec.MediaTypeImageLayer, []byte("blob"))             // Blob 1
 	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer"))   // Blob 2, dangling layer
 	generateManifest(descs[0], nil, descs[1])                           // Blob 3, valid manifest
-	generateManifest(descs[0], &descs[3], descs[1])                     // Blob 4, referrer of a valid manifest, not in index.json
+	generateManifest(descs[0], &descs[3], descs[1])                     // Blob 4, referrer of a valid manifest, not in index.json, should be cleaned with current implementation
 	appendBlob(ocispec.MediaTypeImageLayer, []byte("dangling layer 2")) // Blob 5, dangling layer
 	generateArtifactManifest(descs[4])                                  // blob 6, dangling artifact
 	generateManifest(descs[0], &descs[5], descs[1])                     // Blob 7, referrer of a dangling manifest
@@ -2954,7 +2953,7 @@ func TestStore_GC(t *testing.T) {
 	}
 
 	// verify existence
-	wantExistence := []bool{true, true, false, true, true, false, false, false, false, false, false, false, false, false, false, false, false}
+	wantExistence := []bool{true, true, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false}
 	for i, wantValue := range wantExistence {
 		exists, err := s.Exists(ctx, descs[i])
 		if err != nil {
@@ -2966,85 +2965,85 @@ func TestStore_GC(t *testing.T) {
 	}
 }
 
-func TestStore_GCErrorPath(t *testing.T) {
-	tempDir := t.TempDir()
-	s, err := New(tempDir)
-	if err != nil {
-		t.Fatal("New() error =", err)
-	}
-	ctx := context.Background()
-
-	// generate test content
-	var blobs [][]byte
-	var descs []ocispec.Descriptor
-	appendBlob := func(mediaType string, blob []byte) {
-		blobs = append(blobs, blob)
-		descs = append(descs, ocispec.Descriptor{
-			MediaType: mediaType,
-			Digest:    digest.FromBytes(blob),
-			Size:      int64(len(blob)),
-		})
-	}
-	appendBlob(ocispec.MediaTypeImageLayer, []byte("valid blob")) // Blob 0
-
-	// push the valid blob
-	err = s.Push(ctx, descs[0], bytes.NewReader(blobs[0]))
-	if err != nil {
-		t.Error("failed to push test content to src")
-	}
-
-	// write random contents
-	algPath := path.Join(tempDir, "blobs")
-	dgstPath := path.Join(algPath, "sha256")
-	if err := os.WriteFile(path.Join(algPath, "other"), []byte("random"), 0444); err != nil {
-		t.Fatal("error calling WriteFile(), error =", err)
-	}
-	if err := os.WriteFile(path.Join(dgstPath, "other2"), []byte("random2"), 0444); err != nil {
-		t.Fatal("error calling WriteFile(), error =", err)
-	}
-
-	// perform GC
-	if err = s.GC(ctx); err != nil {
-		t.Fatal(err)
-	}
-
-	appendBlob(ocispec.MediaTypeImageLayer, []byte("valid blob 2")) // Blob 1
-
-	// push the valid blob
-	err = s.Push(ctx, descs[1], bytes.NewReader(blobs[1]))
-	if err != nil {
-		t.Error("failed to push test content to src")
-	}
-
-	// test os.ReadDir() errors
-	s.root = "random dir"
-	if err = s.GC(ctx); err == nil {
-		t.Fatal("expect an error when os.ReadDir()")
-	}
-	s.root = tempDir
-	if err := os.WriteFile(path.Join(algPath, "sha384"), []byte("not a dir"), 0444); err != nil {
-		t.Fatal("error calling WriteFile(), error =", err)
-	}
-	if err = s.GC(ctx); err != nil {
-		t.Fatal("this error should be silently ignored")
-	}
-	if err := os.Remove(path.Join(algPath, "sha384")); err != nil {
-		t.Fatal(err)
-	}
-
-	// test os.Remove() error
-	badDigest := digest.FromBytes([]byte("bad digest")).Encoded()
-	badPath := path.Join(algPath, "sha256", badDigest)
-	if err := os.Mkdir(badPath, 0777); err != nil {
-		t.Fatal(err)
-	}
-	if err := os.WriteFile(path.Join(badPath, "whatever"), []byte("extra content"), 0444); err != nil {
-		t.Fatal("error calling WriteFile(), error =", err)
-	}
-	if err = s.GC(ctx); err == nil {
-		t.Fatal("expect an error when os.Remove()")
-	}
-}
+// func TestStore_GCErrorPath(t *testing.T) {
+// 	tempDir := t.TempDir()
+// 	s, err := New(tempDir)
+// 	if err != nil {
+// 		t.Fatal("New() error =", err)
+// 	}
+// 	ctx := context.Background()
+
+// 	// generate test content
+// 	var blobs [][]byte
+// 	var descs []ocispec.Descriptor
+// 	appendBlob := func(mediaType string, blob []byte) {
+// 		blobs = append(blobs, blob)
+// 		descs = append(descs, ocispec.Descriptor{
+// 			MediaType: mediaType,
+// 			Digest:    digest.FromBytes(blob),
+// 			Size:      int64(len(blob)),
+// 		})
+// 	}
+// 	appendBlob(ocispec.MediaTypeImageLayer, []byte("valid blob")) // Blob 0
+
+// 	// push the valid blob
+// 	err = s.Push(ctx, descs[0], bytes.NewReader(blobs[0]))
+// 	if err != nil {
+// 		t.Error("failed to push test content to src")
+// 	}
+
+// 	// write random contents
+// 	algPath := path.Join(tempDir, "blobs")
+// 	dgstPath := path.Join(algPath, "sha256")
+// 	if err := os.WriteFile(path.Join(algPath, "other"), []byte("random"), 0444); err != nil {
+// 		t.Fatal("error calling WriteFile(), error =", err)
+// 	}
+// 	if err := os.WriteFile(path.Join(dgstPath, "other2"), []byte("random2"), 0444); err != nil {
+// 		t.Fatal("error calling WriteFile(), error =", err)
+// 	}
+
+// 	// perform GC
+// 	if err = s.GC(ctx); err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	appendBlob(ocispec.MediaTypeImageLayer, []byte("valid blob 2")) // Blob 1
+
+// 	// push the valid blob
+// 	err = s.Push(ctx, descs[1], bytes.NewReader(blobs[1]))
+// 	if err != nil {
+// 		t.Error("failed to push test content to src")
+// 	}
+
+// 	// test os.ReadDir() errors
+// 	s.root = "random dir"
+// 	if err = s.GC(ctx); err == nil {
+// 		t.Fatal("expect an error when os.ReadDir()")
+// 	}
+// 	s.root = tempDir
+// 	if err := os.WriteFile(path.Join(algPath, "sha384"), []byte("not a dir"), 0444); err != nil {
+// 		t.Fatal("error calling WriteFile(), error =", err)
+// 	}
+// 	if err = s.GC(ctx); err != nil {
+// 		t.Fatal("this error should be silently ignored")
+// 	}
+// 	if err := os.Remove(path.Join(algPath, "sha384")); err != nil {
+// 		t.Fatal(err)
+// 	}
+
+// 	// test os.Remove() error
+// 	badDigest := digest.FromBytes([]byte("bad digest")).Encoded()
+// 	badPath := path.Join(algPath, "sha256", badDigest)
+// 	if err := os.Mkdir(badPath, 0777); err != nil {
+// 		t.Fatal(err)
+// 	}
+// 	if err := os.WriteFile(path.Join(badPath, "whatever"), []byte("extra content"), 0444); err != nil {
+// 		t.Fatal("error calling WriteFile(), error =", err)
+// 	}
+// 	if err = s.GC(ctx); err == nil {
+// 		t.Fatal("expect an error when os.Remove()")
+// 	}
+// }
 
 func equalDescriptorSet(actual []ocispec.Descriptor, expected []ocispec.Descriptor) bool {
 	if len(actual) != len(expected) {
diff --git a/internal/graph/memory.go b/internal/graph/memory.go
index b93df83e..aa735552 100644
--- a/internal/graph/memory.go
+++ b/internal/graph/memory.go
@@ -20,6 +20,7 @@ import (
 	"errors"
 	"sync"
 
+	"github.com/opencontainers/go-digest"
 	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
 	"oras.land/oras-go/v2/content"
 	"oras.land/oras-go/v2/errdef"
@@ -147,6 +148,15 @@ func (m *Memory) Remove(node ocispec.Descriptor) []ocispec.Descriptor {
 	return danglings
 }
 
+// DigestSet returns the set of node digest in memory.
+func (m *Memory) DigestSet() set.Set[digest.Digest] {
+	s := set.New[digest.Digest]()
+	for desc := range m.nodes {
+		s.Add(desc.Digest)
+	}
+	return s
+}
+
 // index indexes predecessors for each direct successor of the given node.
 func (m *Memory) index(ctx context.Context, fetcher content.Fetcher, node ocispec.Descriptor) ([]ocispec.Descriptor, error) {
 	successors, err := content.Successors(ctx, fetcher, node)

From 1e15e737ae4f6d8a2e25da4154e2ac91d51dc5eb Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Tue, 9 Jan 2024 17:06:04 +0800
Subject: [PATCH 19/25] increase coverage

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 internal/graph/memory_test.go | 78 +++++++++++++++++++++++++++++++++++
 1 file changed, 78 insertions(+)

diff --git a/internal/graph/memory_test.go b/internal/graph/memory_test.go
index 9b5bab33..89ef4446 100644
--- a/internal/graph/memory_test.go
+++ b/internal/graph/memory_test.go
@@ -610,3 +610,81 @@ func TestMemory_IndexAllAndPredecessors(t *testing.T) {
 		t.Errorf("incorrect predecessor result")
 	}
 }
+
+func TestMemory_DigestSet(t *testing.T) {
+	testFetcher := cas.NewMemory()
+	testMemory := NewMemory()
+	ctx := context.Background()
+
+	// generate test content
+	var blobs [][]byte
+	var descriptors []ocispec.Descriptor
+	appendBlob := func(mediaType string, blob []byte) ocispec.Descriptor {
+		blobs = append(blobs, blob)
+		descriptors = append(descriptors, ocispec.Descriptor{
+			MediaType: mediaType,
+			Digest:    digest.FromBytes(blob),
+			Size:      int64(len(blob)),
+		})
+		return descriptors[len(descriptors)-1]
+	}
+	generateManifest := func(layers ...ocispec.Descriptor) ocispec.Descriptor {
+		manifest := ocispec.Manifest{
+			Config: ocispec.Descriptor{MediaType: "test config"},
+			Layers: layers,
+		}
+		manifestJSON, err := json.Marshal(manifest)
+		if err != nil {
+			t.Fatal(err)
+		}
+		return appendBlob(ocispec.MediaTypeImageManifest, manifestJSON)
+	}
+	generateIndex := func(manifests ...ocispec.Descriptor) ocispec.Descriptor {
+		index := ocispec.Index{
+			Manifests: manifests,
+		}
+		indexJSON, err := json.Marshal(index)
+		if err != nil {
+			t.Fatal(err)
+		}
+		return appendBlob(ocispec.MediaTypeImageIndex, indexJSON)
+	}
+	descE := appendBlob("layer node E", []byte("Node E is a layer")) // blobs[0], layer "E"
+	descF := appendBlob("layer node F", []byte("Node F is a layer")) // blobs[1], layer "F"
+	descB := generateManifest(descriptors[0:1]...)                   // blobs[2], manifest "B"
+	descC := generateManifest(descriptors[0:2]...)                   // blobs[3], manifest "C"
+	descD := generateManifest(descriptors[1:2]...)                   // blobs[4], manifest "D"
+	descA := generateIndex(descriptors[2:5]...)                      // blobs[5], index "A"
+
+	// prepare the content in the fetcher, so that it can be used to test IndexAll
+	testContents := []ocispec.Descriptor{descE, descF, descB, descC, descD, descA}
+	for i := 0; i < len(blobs); i++ {
+		testFetcher.Push(ctx, testContents[i], bytes.NewReader(blobs[i]))
+	}
+
+	// make sure that testFetcher works
+	rc, err := testFetcher.Fetch(ctx, descA)
+	if err != nil {
+		t.Errorf("testFetcher.Fetch() error = %v", err)
+	}
+	got, err := io.ReadAll(rc)
+	if err != nil {
+		t.Errorf("testFetcher.Fetch().Read() error = %v", err)
+	}
+	err = rc.Close()
+	if err != nil {
+		t.Errorf("testFetcher.Fetch().Close() error = %v", err)
+	}
+	if !bytes.Equal(got, blobs[5]) {
+		t.Errorf("testFetcher.Fetch() = %v, want %v", got, blobs[4])
+	}
+
+	// index node A into testMemory using IndexAll
+	testMemory.IndexAll(ctx, testFetcher, descA)
+	digestSet := testMemory.DigestSet()
+	for i := 0; i < len(blobs); i++ {
+		if exists := digestSet.Contains(descriptors[i].Digest); exists != true {
+			t.Errorf("digest of blob[%d] should exist in digestSet", i)
+		}
+	}
+}

From 70afd4037cf4b1767fc0334baac7151a66db92bf Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Tue, 9 Jan 2024 17:15:21 +0800
Subject: [PATCH 20/25] increase coverage

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go      |   9 +--
 content/oci/oci_test.go | 159 ++++++++++++++++++++--------------------
 2 files changed, 84 insertions(+), 84 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index 034a43e8..8a2f7904 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -489,10 +489,9 @@ func (s *Store) GC(ctx context.Context) error {
 		return err
 	}
 	for _, algDir := range algDirs {
-		// if !algDir.IsDir() {
-		// 	fmt.Println("got here")
-		// 	continue
-		// }
+		if !algDir.IsDir() {
+			continue
+		}
 		alg := algDir.Name()
 		// skip unsupported directories
 		if !isKnownAlgorithm(alg) {
@@ -501,7 +500,7 @@ func (s *Store) GC(ctx context.Context) error {
 		algPath := path.Join(rootpath, alg)
 		dgstDirs, err := os.ReadDir(algPath)
 		if err != nil {
-			continue
+			return err
 		}
 		for _, dgstDir := range dgstDirs {
 			if err := isContextDone(ctx); err != nil {
diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index 5b774e91..ac0e10a2 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -24,6 +24,7 @@ import (
 	"fmt"
 	"io"
 	"os"
+	"path"
 	"path/filepath"
 	"reflect"
 	"strconv"
@@ -2965,85 +2966,85 @@ func TestStore_GC(t *testing.T) {
 	}
 }
 
-// func TestStore_GCErrorPath(t *testing.T) {
-// 	tempDir := t.TempDir()
-// 	s, err := New(tempDir)
-// 	if err != nil {
-// 		t.Fatal("New() error =", err)
-// 	}
-// 	ctx := context.Background()
-
-// 	// generate test content
-// 	var blobs [][]byte
-// 	var descs []ocispec.Descriptor
-// 	appendBlob := func(mediaType string, blob []byte) {
-// 		blobs = append(blobs, blob)
-// 		descs = append(descs, ocispec.Descriptor{
-// 			MediaType: mediaType,
-// 			Digest:    digest.FromBytes(blob),
-// 			Size:      int64(len(blob)),
-// 		})
-// 	}
-// 	appendBlob(ocispec.MediaTypeImageLayer, []byte("valid blob")) // Blob 0
-
-// 	// push the valid blob
-// 	err = s.Push(ctx, descs[0], bytes.NewReader(blobs[0]))
-// 	if err != nil {
-// 		t.Error("failed to push test content to src")
-// 	}
-
-// 	// write random contents
-// 	algPath := path.Join(tempDir, "blobs")
-// 	dgstPath := path.Join(algPath, "sha256")
-// 	if err := os.WriteFile(path.Join(algPath, "other"), []byte("random"), 0444); err != nil {
-// 		t.Fatal("error calling WriteFile(), error =", err)
-// 	}
-// 	if err := os.WriteFile(path.Join(dgstPath, "other2"), []byte("random2"), 0444); err != nil {
-// 		t.Fatal("error calling WriteFile(), error =", err)
-// 	}
-
-// 	// perform GC
-// 	if err = s.GC(ctx); err != nil {
-// 		t.Fatal(err)
-// 	}
-
-// 	appendBlob(ocispec.MediaTypeImageLayer, []byte("valid blob 2")) // Blob 1
-
-// 	// push the valid blob
-// 	err = s.Push(ctx, descs[1], bytes.NewReader(blobs[1]))
-// 	if err != nil {
-// 		t.Error("failed to push test content to src")
-// 	}
-
-// 	// test os.ReadDir() errors
-// 	s.root = "random dir"
-// 	if err = s.GC(ctx); err == nil {
-// 		t.Fatal("expect an error when os.ReadDir()")
-// 	}
-// 	s.root = tempDir
-// 	if err := os.WriteFile(path.Join(algPath, "sha384"), []byte("not a dir"), 0444); err != nil {
-// 		t.Fatal("error calling WriteFile(), error =", err)
-// 	}
-// 	if err = s.GC(ctx); err != nil {
-// 		t.Fatal("this error should be silently ignored")
-// 	}
-// 	if err := os.Remove(path.Join(algPath, "sha384")); err != nil {
-// 		t.Fatal(err)
-// 	}
-
-// 	// test os.Remove() error
-// 	badDigest := digest.FromBytes([]byte("bad digest")).Encoded()
-// 	badPath := path.Join(algPath, "sha256", badDigest)
-// 	if err := os.Mkdir(badPath, 0777); err != nil {
-// 		t.Fatal(err)
-// 	}
-// 	if err := os.WriteFile(path.Join(badPath, "whatever"), []byte("extra content"), 0444); err != nil {
-// 		t.Fatal("error calling WriteFile(), error =", err)
-// 	}
-// 	if err = s.GC(ctx); err == nil {
-// 		t.Fatal("expect an error when os.Remove()")
-// 	}
-// }
+func TestStore_GCErrorPath(t *testing.T) {
+	tempDir := t.TempDir()
+	s, err := New(tempDir)
+	if err != nil {
+		t.Fatal("New() error =", err)
+	}
+	ctx := context.Background()
+
+	// generate test content
+	var blobs [][]byte
+	var descs []ocispec.Descriptor
+	appendBlob := func(mediaType string, blob []byte) {
+		blobs = append(blobs, blob)
+		descs = append(descs, ocispec.Descriptor{
+			MediaType: mediaType,
+			Digest:    digest.FromBytes(blob),
+			Size:      int64(len(blob)),
+		})
+	}
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("valid blob")) // Blob 0
+
+	// push the valid blob
+	err = s.Push(ctx, descs[0], bytes.NewReader(blobs[0]))
+	if err != nil {
+		t.Error("failed to push test content to src")
+	}
+
+	// write random contents
+	algPath := path.Join(tempDir, "blobs")
+	dgstPath := path.Join(algPath, "sha256")
+	if err := os.WriteFile(path.Join(algPath, "other"), []byte("random"), 0444); err != nil {
+		t.Fatal("error calling WriteFile(), error =", err)
+	}
+	if err := os.WriteFile(path.Join(dgstPath, "other2"), []byte("random2"), 0444); err != nil {
+		t.Fatal("error calling WriteFile(), error =", err)
+	}
+
+	// perform GC
+	if err = s.GC(ctx); err != nil {
+		t.Fatal(err)
+	}
+
+	// appendBlob(ocispec.MediaTypeImageLayer, []byte("valid blob 2")) // Blob 1
+
+	// // push the valid blob
+	// err = s.Push(ctx, descs[1], bytes.NewReader(blobs[1]))
+	// if err != nil {
+	// 	t.Error("failed to push test content to src")
+	// }
+
+	// // test os.ReadDir() errors
+	// s.root = "random dir"
+	// if err = s.GC(ctx); err == nil {
+	// 	t.Fatal("expect an error when os.ReadDir()")
+	// }
+	// s.root = tempDir
+	// if err := os.WriteFile(path.Join(algPath, "sha384"), []byte("not a dir"), 0444); err != nil {
+	// 	t.Fatal("error calling WriteFile(), error =", err)
+	// }
+	// if err = s.GC(ctx); err != nil {
+	// 	t.Fatal("this error should be silently ignored")
+	// }
+	// if err := os.Remove(path.Join(algPath, "sha384")); err != nil {
+	// 	t.Fatal(err)
+	// }
+
+	// // test os.Remove() error
+	// badDigest := digest.FromBytes([]byte("bad digest")).Encoded()
+	// badPath := path.Join(algPath, "sha256", badDigest)
+	// if err := os.Mkdir(badPath, 0777); err != nil {
+	// 	t.Fatal(err)
+	// }
+	// if err := os.WriteFile(path.Join(badPath, "whatever"), []byte("extra content"), 0444); err != nil {
+	// 	t.Fatal("error calling WriteFile(), error =", err)
+	// }
+	// if err = s.GC(ctx); err == nil {
+	// 	t.Fatal("expect an error when os.Remove()")
+	// }
+}
 
 func equalDescriptorSet(actual []ocispec.Descriptor, expected []ocispec.Descriptor) bool {
 	if len(actual) != len(expected) {

From fc450e1d03649fb78a5b184f8ed848d087a80f48 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Tue, 9 Jan 2024 17:29:26 +0800
Subject: [PATCH 21/25] increase coverage

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci_test.go | 38 +++++++++++++++++++-------------------
 1 file changed, 19 insertions(+), 19 deletions(-)

diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index ac0e10a2..225d04d3 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -3008,15 +3008,15 @@ func TestStore_GCErrorPath(t *testing.T) {
 		t.Fatal(err)
 	}
 
-	// appendBlob(ocispec.MediaTypeImageLayer, []byte("valid blob 2")) // Blob 1
+	appendBlob(ocispec.MediaTypeImageLayer, []byte("valid blob 2")) // Blob 1
 
-	// // push the valid blob
-	// err = s.Push(ctx, descs[1], bytes.NewReader(blobs[1]))
-	// if err != nil {
-	// 	t.Error("failed to push test content to src")
-	// }
+	// push the valid blob
+	err = s.Push(ctx, descs[1], bytes.NewReader(blobs[1]))
+	if err != nil {
+		t.Error("failed to push test content to src")
+	}
 
-	// // test os.ReadDir() errors
+	// test os.ReadDir() errors
 	// s.root = "random dir"
 	// if err = s.GC(ctx); err == nil {
 	// 	t.Fatal("expect an error when os.ReadDir()")
@@ -3032,18 +3032,18 @@ func TestStore_GCErrorPath(t *testing.T) {
 	// 	t.Fatal(err)
 	// }
 
-	// // test os.Remove() error
-	// badDigest := digest.FromBytes([]byte("bad digest")).Encoded()
-	// badPath := path.Join(algPath, "sha256", badDigest)
-	// if err := os.Mkdir(badPath, 0777); err != nil {
-	// 	t.Fatal(err)
-	// }
-	// if err := os.WriteFile(path.Join(badPath, "whatever"), []byte("extra content"), 0444); err != nil {
-	// 	t.Fatal("error calling WriteFile(), error =", err)
-	// }
-	// if err = s.GC(ctx); err == nil {
-	// 	t.Fatal("expect an error when os.Remove()")
-	// }
+	// test os.Remove() error
+	badDigest := digest.FromBytes([]byte("bad digest")).Encoded()
+	badPath := path.Join(algPath, "sha256", badDigest)
+	if err := os.Mkdir(badPath, 0777); err != nil {
+		t.Fatal(err)
+	}
+	if err := os.WriteFile(path.Join(badPath, "whatever"), []byte("extra content"), 0444); err != nil {
+		t.Fatal("error calling WriteFile(), error =", err)
+	}
+	if err = s.GC(ctx); err == nil {
+		t.Fatal("expect an error when os.Remove()")
+	}
 }
 
 func equalDescriptorSet(actual []ocispec.Descriptor, expected []ocispec.Descriptor) bool {

From a429bb4d8f998563ca5711bcd8c240472ffc6e27 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Tue, 9 Jan 2024 17:57:13 +0800
Subject: [PATCH 22/25] increase coverage

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci_test.go | 22 +++++++---------------
 1 file changed, 7 insertions(+), 15 deletions(-)

diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index 225d04d3..78d36050 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -3016,21 +3016,13 @@ func TestStore_GCErrorPath(t *testing.T) {
 		t.Error("failed to push test content to src")
 	}
 
-	// test os.ReadDir() errors
-	// s.root = "random dir"
-	// if err = s.GC(ctx); err == nil {
-	// 	t.Fatal("expect an error when os.ReadDir()")
-	// }
-	// s.root = tempDir
-	// if err := os.WriteFile(path.Join(algPath, "sha384"), []byte("not a dir"), 0444); err != nil {
-	// 	t.Fatal("error calling WriteFile(), error =", err)
-	// }
-	// if err = s.GC(ctx); err != nil {
-	// 	t.Fatal("this error should be silently ignored")
-	// }
-	// if err := os.Remove(path.Join(algPath, "sha384")); err != nil {
-	// 	t.Fatal(err)
-	// }
+	// test os.ReadDir() error
+	if err := os.Mkdir(path.Join(algPath, "sha666"), 0777); err != nil {
+		t.Fatal(err)
+	}
+	if err = s.GC(ctx); err != nil {
+		t.Fatal("this error should be silently ignored")
+	}
 
 	// test os.Remove() error
 	badDigest := digest.FromBytes([]byte("bad digest")).Encoded()

From a6eadac478022a877bfff4e4f4808c7d51efe755 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Tue, 9 Jan 2024 18:07:08 +0800
Subject: [PATCH 23/25] minor fix

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go      | 2 +-
 content/oci/oci_test.go | 4 ++--
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index 8a2f7904..8acd3dc9 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -509,7 +509,7 @@ func (s *Store) GC(ctx context.Context) error {
 			dgst := dgstDir.Name()
 			blobDigest := digest.NewDigestFromEncoded(digest.Algorithm(alg), dgst)
 			err := blobDigest.Validate()
-			// skip unsupported directories
+			// skip irrelevant content
 			if err != nil {
 				continue
 			}
diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index 78d36050..f5b9fe3f 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -3016,7 +3016,7 @@ func TestStore_GCErrorPath(t *testing.T) {
 		t.Error("failed to push test content to src")
 	}
 
-	// test os.ReadDir() error
+	// unknown algorithm
 	if err := os.Mkdir(path.Join(algPath, "sha666"), 0777); err != nil {
 		t.Fatal(err)
 	}
@@ -3024,7 +3024,7 @@ func TestStore_GCErrorPath(t *testing.T) {
 		t.Fatal("this error should be silently ignored")
 	}
 
-	// test os.Remove() error
+	// os.Remove() error
 	badDigest := digest.FromBytes([]byte("bad digest")).Encoded()
 	badPath := path.Join(algPath, "sha256", badDigest)
 	if err := os.Mkdir(badPath, 0777); err != nil {

From de79a2e2bf07bfb0a2ad7cd7c1c725511a292ffb Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Tue, 9 Jan 2024 19:32:11 +0800
Subject: [PATCH 24/25] resolved comments

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go      | 10 ++++++----
 content/oci/oci_test.go | 12 ++++++++++++
 2 files changed, 18 insertions(+), 4 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index 8acd3dc9..c0b19c4a 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -468,7 +468,9 @@ func (s *Store) reloadIndex(ctx context.Context) error {
 	return nil
 }
 
-// GC removes garbage from Store. The garbage to be cleaned are:
+// GC removes garbage from Store. Unsaved index will be lost. To prevent unexpected
+// loss, call SaveIndex() before GC or set AutoSaveIndex to true.
+// The garbage to be cleaned are:
 //   - unreferenced (dangling) blobs in Store which have no predecessors
 //   - garbage blobs in the storage whose metadata is not stored in Store
 func (s *Store) GC(ctx context.Context) error {
@@ -498,15 +500,15 @@ func (s *Store) GC(ctx context.Context) error {
 			continue
 		}
 		algPath := path.Join(rootpath, alg)
-		dgstDirs, err := os.ReadDir(algPath)
+		digestEntries, err := os.ReadDir(algPath)
 		if err != nil {
 			return err
 		}
-		for _, dgstDir := range dgstDirs {
+		for _, digestEntry := range digestEntries {
 			if err := isContextDone(ctx); err != nil {
 				return err
 			}
-			dgst := dgstDir.Name()
+			dgst := digestEntry.Name()
 			blobDigest := digest.NewDigestFromEncoded(digest.Algorithm(alg), dgst)
 			err := blobDigest.Validate()
 			// skip irrelevant content
diff --git a/content/oci/oci_test.go b/content/oci/oci_test.go
index f5b9fe3f..42702a6d 100644
--- a/content/oci/oci_test.go
+++ b/content/oci/oci_test.go
@@ -3057,3 +3057,15 @@ func equalDescriptorSet(actual []ocispec.Descriptor, expected []ocispec.Descript
 	}
 	return true
 }
+
+func Test_isContextDone(t *testing.T) {
+	ctx := context.Background()
+	ctx, cancel := context.WithCancel(ctx)
+	if err := isContextDone(ctx); err != nil {
+		t.Errorf("expect error = %v, got %v", nil, err)
+	}
+	cancel()
+	if err := isContextDone(ctx); err != context.Canceled {
+		t.Errorf("expect error = %v, got %v", context.Canceled, err)
+	}
+}

From 20af59f18c0a80f3e34aa6cfc832d6f7f5434fd5 Mon Sep 17 00:00:00 2001
From: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
Date: Thu, 11 Jan 2024 09:52:03 +0800
Subject: [PATCH 25/25] resolved comments

Signed-off-by: Xiaoxuan Wang <wangxiaoxuan119@gmail.com>
---
 content/oci/oci.go | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

diff --git a/content/oci/oci.go b/content/oci/oci.go
index c0b19c4a..ccefc0d9 100644
--- a/content/oci/oci.go
+++ b/content/oci/oci.go
@@ -485,7 +485,7 @@ func (s *Store) GC(ctx context.Context) error {
 	reachableNodes := s.graph.DigestSet()
 
 	// clean up garbage blobs in the storage
-	rootpath := filepath.Join(s.root, "blobs")
+	rootpath := filepath.Join(s.root, ocispec.ImageBlobsDir)
 	algDirs, err := os.ReadDir(rootpath)
 	if err != nil {
 		return err
@@ -510,9 +510,8 @@ func (s *Store) GC(ctx context.Context) error {
 			}
 			dgst := digestEntry.Name()
 			blobDigest := digest.NewDigestFromEncoded(digest.Algorithm(alg), dgst)
-			err := blobDigest.Validate()
-			// skip irrelevant content
-			if err != nil {
+			if err := blobDigest.Validate(); err != nil {
+				// skip irrelevant content
 				continue
 			}
 			if !reachableNodes.Contains(blobDigest) {