drone/registry/app/pkg/filemanager/file_manager.go
Arvind Choudhary ced5ce2f65 feat: [AH-993]: Complete implementation of Upstream changes of Python Package (#3573)
* [AH-993]: Review comments fixed
* [AH-993]: Merge commit
* [AH-993]: Updated upstream creation
* [AH-993]: Cleanup
* [AH-993]: Updated messages
* [AH-993]: Merge commit
* [AH-993]: Upstream flows support for Python Packages
* [AH-993]: Updated local file
* [AH-993]: Added support for local and created arch to support different package types
* Merge branch 'main' of https://git0.harness.io/l7B_kbSEQD2wjrM7PShm5w/PROD/Harness_Commons/gitness into AH-993-upstream-implementation
* [AH-993]: temp commit
* [AH-993]: Merge commit:
* [AH-993]: temp update
2025-03-25 05:36:47 +00:00

339 lines
9.4 KiB
Go

// Copyright 2023 Harness, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package filemanager
import (
"context"
"fmt"
"io"
"mime/multipart"
"path"
"strings"
"github.com/harness/gitness/registry/app/storage"
"github.com/harness/gitness/registry/app/store"
"github.com/harness/gitness/registry/types"
"github.com/harness/gitness/store/database/dbtx"
"github.com/google/uuid"
"github.com/rs/zerolog/log"
)
const (
rootPathString = "/"
tmp = "tmp"
files = "files"
nodeLimit = 1000
pathFormat = "for path: %s, with error %w"
)
func NewFileManager(
app *App, registryDao store.RegistryRepository, genericBlobDao store.GenericBlobRepository,
nodesDao store.NodesRepository,
tx dbtx.Transactor,
) FileManager {
return FileManager{
App: app,
registryDao: registryDao,
genericBlobDao: genericBlobDao,
nodesDao: nodesDao,
tx: tx,
}
}
type FileManager struct {
App *App
registryDao store.RegistryRepository
genericBlobDao store.GenericBlobRepository
nodesDao store.NodesRepository
tx dbtx.Transactor
}
func (f *FileManager) UploadFile(
ctx context.Context,
filePath string,
regName string,
regID int64,
rootParentID int64,
rootIdentifier string,
file multipart.File,
fileReader io.Reader,
filename string,
) (types.FileInfo, error) {
// uploading the file to temporary path in file storage
blobContext := f.App.GetBlobsContext(ctx, regName, rootIdentifier)
pathUUID := uuid.NewString()
tmpPath := path.Join(rootPathString, rootIdentifier, tmp, pathUUID)
fw, err := blobContext.genericBlobStore.Create(ctx, tmpPath)
if err != nil {
log.Error().Msgf("failed to initiate the file upload for file with"+
" name : %s with error : %s", filename, err.Error())
return types.FileInfo{}, fmt.Errorf("failed to initiate the file upload "+
"for file with name : %s with error : %w", filename, err)
}
defer fw.Close()
fileInfo, err := blobContext.genericBlobStore.Write(ctx, fw, file, fileReader)
if err != nil {
log.Error().Msgf("failed to upload the file on temparary location"+
" with name : %s with error : %s", filename, err.Error())
return types.FileInfo{}, fmt.Errorf("failed to upload the file on temparary "+
"location with name : %s with error : %w", filename, err)
}
fileInfo.Filename = filename
// Moving the file to permanent path in file storage
fileStoragePath := path.Join(rootPathString, rootIdentifier, files, fileInfo.Sha256)
err = blobContext.genericBlobStore.Move(ctx, tmpPath, fileStoragePath)
if err != nil {
log.Error().Msgf("failed to Move the file on permanent location "+
"with name : %s with error : %s", filename, err.Error())
return types.FileInfo{}, fmt.Errorf("failed to Move the file on permanent"+
" location with name : %s with error : %w", filename, err)
}
// Saving in the generic blobs table
var blobID = ""
gb := &types.GenericBlob{
RootParentID: rootParentID,
Sha1: fileInfo.Sha1,
Sha256: fileInfo.Sha256,
Sha512: fileInfo.Sha512,
MD5: fileInfo.MD5,
Size: fileInfo.Size,
}
err = f.genericBlobDao.Create(ctx, gb)
if err != nil {
log.Error().Msgf("failed to save generic blob in db with "+
"sha256 : %s, err: %s", fileInfo.Sha256, err.Error())
return types.FileInfo{}, fmt.Errorf("failed to save generic blob"+
" in db with sha256 : %s, err: %w", fileInfo.Sha256, err)
}
blobID = gb.ID
// Saving the nodes
err = f.tx.WithTx(ctx, func(ctx context.Context) error {
err = f.createNodes(ctx, filePath, blobID, regID)
if err != nil {
return err
}
return nil
})
if err != nil {
log.Error().Msgf("failed to save nodes for file : %s, with "+
"path : %s, err: %s", filename, filePath, err)
return types.FileInfo{}, fmt.Errorf("failed to save nodes for"+
" file : %s, with path : %s, err: %w", filename, filePath, err)
}
return fileInfo, nil
}
func (f *FileManager) createNodes(ctx context.Context, filePath string, blobID string, regID int64) error {
segments := strings.Split(filePath, rootPathString)
parentID := ""
// Start with root (-1)
// Iterate through segments and create Node objects
nodePath := ""
for i, segment := range segments {
if i >= nodeLimit { // Stop after 1000 iterations
break
}
if segment == "" {
continue // Skip empty segments
}
var nodeID string
var err error
nodePath += rootPathString + segment
if i == len(segments)-1 {
nodeID, err = f.SaveNode(ctx, filePath, blobID, regID, segment,
parentID, nodePath, true)
if err != nil {
return err
}
} else {
nodeID, err = f.SaveNode(ctx, filePath, "", regID, segment,
parentID, nodePath, false)
if err != nil {
return err
}
}
parentID = nodeID
}
return nil
}
func (f *FileManager) SaveNode(
ctx context.Context, filePath string, blobID string, regID int64, segment string,
parentID string, nodePath string, isFile bool,
) (string, error) {
node := &types.Node{
Name: segment,
RegistryID: regID,
ParentNodeID: parentID,
IsFile: isFile,
NodePath: nodePath,
BlobID: blobID,
}
err := f.nodesDao.Create(ctx, node)
if err != nil {
return "", fmt.Errorf("failed to create the node: %s, "+
"for path := %s", segment, filePath)
}
return node.ID, nil
}
func (f *FileManager) DownloadFile(
ctx context.Context,
filePath string,
regInfo types.Registry,
rootIdentifier string,
) (fileReader *storage.FileReader, size int64, redirectURL string, err error) {
node, err := f.nodesDao.GetByPathAndRegistryID(ctx, regInfo.ID, filePath)
if err != nil {
return nil, 0, "", fmt.Errorf("failed to get the file for path: %s, "+
"with registry: %s", filePath, regInfo.Name)
}
blob, err := f.genericBlobDao.FindByID(ctx, node.BlobID)
if err != nil {
return nil, 0, "", fmt.Errorf("failed to get the blob for path: %s, "+
"with blob id: %s, with error %s", filePath, blob.ID, err)
}
completeFilaPath := path.Join(rootPathString + rootIdentifier + rootPathString + files + rootPathString + blob.Sha256)
//
blobContext := f.App.GetBlobsContext(ctx, regInfo.Name, rootIdentifier)
reader, redirectURL, err := blobContext.genericBlobStore.Get(ctx, completeFilaPath, blob.Size)
if err != nil {
return nil, 0, "", fmt.Errorf("failed to get the file for path: %s, "+
" with error %w", completeFilaPath, err)
}
if redirectURL != "" {
return reader, blob.Size, redirectURL, nil
}
return reader, blob.Size, "", nil
}
func (f *FileManager) DeleteFile(
ctx context.Context,
filePath string,
regID int,
) error {
log.Ctx(ctx).Info().Msgf("%s%d", filePath, regID)
return nil
}
func (f *FileManager) HeadFile(
ctx context.Context,
filePath string,
regID int64,
) (string, error) {
node, err := f.nodesDao.GetByPathAndRegistryID(ctx, regID, filePath)
if err != nil {
return "", fmt.Errorf("failed to get the node path mapping for path: %s, "+
"with error %w", filePath, err)
}
blob, err := f.genericBlobDao.FindByID(ctx, node.BlobID)
if err != nil {
return "", fmt.Errorf("failed to get the blob for path: %s, with blob id: %s,"+
" with error %w", filePath, node.BlobID, err)
}
return blob.Sha256, nil
}
func (f *FileManager) GetFileMetadata(
ctx context.Context,
filePath string,
regID int64,
) (types.FileInfo, error) {
node, err := f.nodesDao.GetByPathAndRegistryID(ctx, regID, filePath)
if err != nil {
return types.FileInfo{}, fmt.Errorf("failed to get the node path mapping "+
pathFormat, filePath, err)
}
blob, err := f.genericBlobDao.FindByID(ctx, node.BlobID)
if err != nil {
return types.FileInfo{}, fmt.Errorf("failed to get the blob for path: %s, "+
"with blob id: %s, with error %s", filePath, node.BlobID, err)
}
return types.FileInfo{
Sha1: blob.Sha1,
Size: blob.Size,
Sha256: blob.Sha256,
Sha512: blob.Sha512,
MD5: blob.MD5,
Filename: node.Name,
}, nil
}
func (f *FileManager) DeleteFileByRegistryID(
ctx context.Context,
regID int64,
regName string,
) error {
err := f.nodesDao.DeleteByRegistryID(ctx, regID)
if err != nil {
return fmt.Errorf("failed to delete all the files for registry with name: %s, with error %w", regName, err)
}
return nil
}
func (f *FileManager) GetFilesMetadata(
ctx context.Context,
filePath string,
regID int64,
sortByField string,
sortByOrder string,
limit int,
offset int,
search string,
) (*[]types.FileNodeMetadata, error) {
node, err := f.nodesDao.GetFilesMetadataByPathAndRegistryID(ctx, regID, filePath,
sortByField,
sortByOrder,
limit,
offset,
search)
if err != nil {
return &[]types.FileNodeMetadata{}, fmt.Errorf("failed to get the files "+
pathFormat, filePath, err)
}
return node, nil
}
func (f *FileManager) CountFilesByPath(
ctx context.Context,
filePath string,
regID int64,
) (int64, error) {
count, err := f.nodesDao.CountByPathAndRegistryID(ctx, regID, filePath)
if err != nil {
return -1, fmt.Errorf("failed to get the count of files"+
pathFormat, filePath, err)
}
return count, nil
}