mirror of
https://github.com/harness/drone.git
synced 2025-05-03 14:40:53 +08:00

* fix * Merge branch 'main' into AH-307-plus-url-support-2_no_rbac * resolve PR comments * resolve PR comments * resolve PR comments * feat: [AH-346]: new api changes for version list and digest list (#2726) * feat: [AH-346]: new api changes for version list and digest list * resolve pr comments * resolve pr comments * feat: [AH-346]: new api yaml integration (#2716) * feat: [AH-346]: new api yaml integration * Merge branch 'main' of https://git0.harness.io/l7B_kbSEQD2wjrM7PShm5w/PROD/Harness_Commons/gitness into AH-307-plus-url-support-2_no_rbac * fix wire check * fix lint issues * fix: [AH-357]: migrations * changes for global artifact listing (#2708) * changes for global artifact listing * Merge branch 'main' into AH-307-plus-url-support-2_no_rbac * merged main * Merge branch 'main' into AH-307-plus-url-support-2_no_rbac * [AH-307]: Updated lint * fix comment * add new method to spacestore * feat: [AH-307]: fix after rebase with main * [AH-307]: Removing comments * [AH-307]: linting fixes * feat: [AH-286]: define proto, interface and no-op reporter implementation to publish artifact events (#2657) * feat: [AH-286]: publish artifact event - no row found is not error * feat: [AH-286]: publish artifact event - no row found is not error * feat: [AH-286]: publish artifact event - lint errors, move publishing event outside DB transaction * feat: [AH-286]: publish artifact event - review comments * feat: [AH-286]: publish artifact event - address review comments * feat: [AH-286]: publish artifact event - keep payload generic * feat: [AH-286]: publish artifact event - as sqlite locks DB, perform db operation outside goroutine publishing of events * feat: [AH-286]: publish artifact event - make publishing event async * feat: [AH-286]: publish artifact event - use api types * feat: [AH-286]: Publish event for SSCA to trigger scans - no need to export spacePathStore * feat: [AH-286]: Publish event for SSCA to trigger scans - send spacePath instead of parentID * feat: [AH-286]: Publish event for SSCA to trigger scans - rename scanner as generic reporter * feat: [AH-286]: Publish event for SSCA to trigger scans - rename scanner as generic reporter * feat: [AH-286]: publish artifact event - reuse redis.Send() * feat: [AH-286]: Publish event for SSCA to trigger scans - review comments * feat: [AH-286]: Publish event for SSCA to trigger scans - remove unused interface * feat: [AH-286]: Publish event for SSCA to trigger scans - update msg format * feat: [AH-286]: define proto, interface and no-op reporter implementation to publish artifact events * feat: [AH-286]: Publish event for SSCA to trigger scans - extract acctID/orgID/projectID from spacepathStore * feat: [AH-286]: publish artifact event - remove protobuf reference, fix lint errors * feat: [AH-286]: publish artifact event - fix msg format * feat: [AH-286]: define proto, interface and no-op reporter implementation to publish artifact events * feat: [AH-286]: define proto, interface and no-op reporter implementation to publish artifact events * feat: [AH-321]: make repo form disabled for rbac (#2687) * feat: [AH-321]: make repo form disabled for rbac * fix wire-gen * GC refactoring * feat: [AH-340]: update UI as per the product feedbacks (#2685) * feat: [AH-340]: update UI as per the product feedbacks * feat: [AH-44]: add module data while redirecting to pipeline execution page * feat: [AH-44]: add build pipeline details in overview cards * feat: [AH-44]: update view for prod and non prod tag * feat: [AH-44]: rearrange filters on artifact list apge * feat: [AH-10]: add schema for overview cards, update artifact list, add ai search input, update api for registry artifact list and update mapping for deployments table * feat: [AH-307]: add secretSpacePath in upstream password field while sending to BE (#2631) * feat: [AH-307]: add secretSpacePath in upstream password field while sending to BE * feat: [AH-299]: support new changes for artifact list page (#2630) * feat: update har service api version * feat: [AH-30]: integrate API schema for deployments list content * feat: [AH-300]: update tag colors for prod and non prod tags * feat: [AH-300]: Add Deployments table in artiface version details page * feat: [AH-299]: support new changes for artifact list page * feat: [AH-299]: support new changes for artifact list page * feat: [AH-321]: support artifact registry rbac permission on UI (#2671) * feat: [AH-321]: support artifact registry rbac permission on UI * enable rbac (#2664) * fix scope * enable rbac * feat: [AH-307]: hide code tab from version details page for both docker and helm * feat: [AH-240]: add custom handling for enterprise auth type field * Merge branch 'AH-307-plus-url-support-2_no_rbac' of https://git0.harness.io/l7B_kbSEQD2wjrM7PShm5w/PROD/Harness_Commons/gitness into AH-307-plus-url-support-2_no_rbac * feat: [AH-307]: send space_ref in query param while creating registries * lowercase rootRef * [AH-307]: updated route * [AH-307]: Added logs * [AH-307]: Added logs * feat: [AH-317]: add space_ref query param * local * Merge commit * Merge commit * Merge commit * Added comments * Revert changes * Merge commit * Merge branch 'main' of https://git0.harness.io/l7B_kbSEQD2wjrM7PShm5w/PROD/Harness_Commons/gitness into AH-307-plus-url-support-2 * Merge branch 'AH-306d' of https://git0.harness.io/l7B_kbSEQD2wjrM7PShm5w/PROD/Harness_Commons/gitness into AH-307-plus-url-support-2 * fix space path handling * Merge branch 'main' of https://git0.harness.io/l7B_kbSEQD2wjrM7PShm5w/PROD/Harness_Commons/gitness into AH-307-plus-url-support-2 * Updated URLs to support slashes with + separator * fix: [AH-306c]: fix anonymous flow * fix: [AH-306c]: fix anonymous flow * feat: [AH-307]: plus url support on UI (cherry picked from commit 3fb6add3ce03498b6668b5f8f6d547e1acedaec4) * [AH-307]: Added examples (cherry picked from commit e83e41303da536f421be333be04aed09fbf75f5f) * [AH-307]: Added Regex request rewrite support (cherry picked from commit ed7b155256bdcd1134bc228b5705556a1233add6) * fix: [AH-306c]: fix anonymous flow
455 lines
11 KiB
Go
455 lines
11 KiB
Go
// Source: https://github.com/distribution/distribution
|
|
|
|
// Copyright 2014 https://github.com/distribution/distribution Authors
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package filesystem
|
|
|
|
import (
|
|
"bufio"
|
|
"bytes"
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"path"
|
|
"time"
|
|
|
|
storagedriver "github.com/harness/gitness/registry/app/driver"
|
|
"github.com/harness/gitness/registry/app/driver/base"
|
|
"github.com/harness/gitness/registry/app/driver/factory"
|
|
|
|
"github.com/rs/zerolog/log"
|
|
)
|
|
|
|
const (
|
|
driverName = "filesystem"
|
|
defaultRootDirectory = "/var/lib/registry"
|
|
defaultMaxThreads = uint64(100)
|
|
|
|
// minThreads is the minimum value for the maxthreads configuration
|
|
// parameter. If the driver's parameters are less than this we set
|
|
// the parameters to minThreads.
|
|
minThreads = uint64(25)
|
|
)
|
|
|
|
func GetDriverName() string {
|
|
return driverName
|
|
}
|
|
|
|
// DriverParameters represents all configuration options available for the
|
|
// filesystem driver.
|
|
type DriverParameters struct {
|
|
RootDirectory string
|
|
MaxThreads uint64
|
|
}
|
|
|
|
// TODO: figure-out why init is not called automatically
|
|
func Register() {
|
|
log.Info().Msgf("registering filesystem driver")
|
|
}
|
|
|
|
func init() {
|
|
factory.Register(driverName, &filesystemDriverFactory{})
|
|
}
|
|
|
|
// filesystemDriverFactory implements the factory.StorageDriverFactory interface.
|
|
type filesystemDriverFactory struct{}
|
|
|
|
func (factory *filesystemDriverFactory) Create(parameters map[string]interface{}) (storagedriver.StorageDriver, error) {
|
|
return FromParameters(parameters)
|
|
}
|
|
|
|
type driver struct {
|
|
rootDirectory string
|
|
}
|
|
|
|
type baseEmbed struct {
|
|
base.Base
|
|
}
|
|
|
|
// Driver is a storagedriver.StorageDriver implementation backed by a local
|
|
// filesystem. All provided paths will be subpaths of the RootDirectory.
|
|
type Driver struct {
|
|
baseEmbed
|
|
}
|
|
|
|
// FromParameters constructs a new Driver with a given parameters map
|
|
// Optional Parameters:
|
|
// - rootdirectory
|
|
// - maxthreads.
|
|
func FromParameters(parameters map[string]interface{}) (*Driver, error) {
|
|
params, err := fromParametersImpl(parameters)
|
|
if err != nil || params == nil {
|
|
return nil, err
|
|
}
|
|
return New(*params), nil
|
|
}
|
|
|
|
func fromParametersImpl(parameters map[string]interface{}) (*DriverParameters, error) {
|
|
var (
|
|
err error
|
|
maxThreads = defaultMaxThreads
|
|
rootDirectory = defaultRootDirectory
|
|
)
|
|
|
|
if parameters != nil {
|
|
if rootDir, ok := parameters["rootdirectory"]; ok {
|
|
rootDirectory = fmt.Sprint(rootDir)
|
|
}
|
|
|
|
maxThreads, err = base.GetLimitFromParameter(parameters["maxthreads"], minThreads, defaultMaxThreads)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("maxthreads config error: %s", err.Error())
|
|
}
|
|
}
|
|
|
|
params := &DriverParameters{
|
|
RootDirectory: rootDirectory,
|
|
MaxThreads: maxThreads,
|
|
}
|
|
return params, nil
|
|
}
|
|
|
|
// New constructs a new Driver with a given rootDirectory.
|
|
func New(params DriverParameters) *Driver {
|
|
fsDriver := &driver{rootDirectory: params.RootDirectory}
|
|
|
|
return &Driver{
|
|
baseEmbed: baseEmbed{
|
|
Base: base.Base{
|
|
StorageDriver: base.NewRegulator(fsDriver, params.MaxThreads),
|
|
},
|
|
},
|
|
}
|
|
}
|
|
|
|
// Implement the storagedriver.StorageDriver interface
|
|
|
|
func (d *driver) Name() string {
|
|
return driverName
|
|
}
|
|
|
|
// GetContent retrieves the content stored at "path" as a []byte.
|
|
func (d *driver) GetContent(ctx context.Context, path string) ([]byte, error) {
|
|
rc, err := d.Reader(ctx, path, 0)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
defer rc.Close()
|
|
|
|
p, err := io.ReadAll(rc)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return p, nil
|
|
}
|
|
|
|
// PutContent stores the []byte content at a location designated by "path".
|
|
func (d *driver) PutContent(ctx context.Context, subPath string, contents []byte) error {
|
|
writer, err := d.Writer(ctx, subPath, false)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer writer.Close()
|
|
_, err = io.Copy(writer, bytes.NewReader(contents))
|
|
if err != nil {
|
|
if cErr := writer.Cancel(ctx); cErr != nil {
|
|
return errors.Join(err, cErr)
|
|
}
|
|
return err
|
|
}
|
|
return writer.Commit(ctx)
|
|
}
|
|
|
|
// Reader retrieves an io.ReadCloser for the content stored at "path" with a
|
|
// given byte offset.
|
|
func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {
|
|
file, err := os.OpenFile(d.fullPath(path), os.O_RDONLY, 0o644)
|
|
log.Ctx(ctx).Info().Msgf("Opening file %s %s", d.fullPath(path), d.rootDirectory)
|
|
if err != nil {
|
|
if os.IsNotExist(err) {
|
|
return nil, storagedriver.PathNotFoundError{Path: path}
|
|
}
|
|
|
|
return nil, err
|
|
}
|
|
|
|
seekPos, err := file.Seek(offset, io.SeekStart)
|
|
if err != nil {
|
|
file.Close()
|
|
return nil, err
|
|
} else if seekPos < offset {
|
|
file.Close()
|
|
return nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset}
|
|
}
|
|
|
|
return file, nil
|
|
}
|
|
|
|
func (d *driver) Writer(_ context.Context, subPath string, appendFlag bool) (storagedriver.FileWriter, error) {
|
|
fullPath := d.fullPath(subPath)
|
|
parentDir := path.Dir(fullPath)
|
|
if err := os.MkdirAll(parentDir, 0o777); err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
fp, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE, 0o666)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
var offset int64
|
|
|
|
if !appendFlag {
|
|
err := fp.Truncate(0)
|
|
if err != nil {
|
|
fp.Close()
|
|
return nil, err
|
|
}
|
|
} else {
|
|
n, err := fp.Seek(0, io.SeekEnd)
|
|
if err != nil {
|
|
fp.Close()
|
|
return nil, err
|
|
}
|
|
offset = n
|
|
}
|
|
|
|
return newFileWriter(fp, offset), nil
|
|
}
|
|
|
|
// Stat retrieves the FileInfo for the given path, including the current size
|
|
// in bytes and the creation time.
|
|
func (d *driver) Stat(_ context.Context, subPath string) (storagedriver.FileInfo, error) {
|
|
fullPath := d.fullPath(subPath)
|
|
|
|
fi, err := os.Stat(fullPath)
|
|
if err != nil {
|
|
if os.IsNotExist(err) {
|
|
return nil, storagedriver.PathNotFoundError{Path: subPath}
|
|
}
|
|
|
|
return nil, err
|
|
}
|
|
|
|
return fileInfo{
|
|
path: subPath,
|
|
FileInfo: fi,
|
|
}, nil
|
|
}
|
|
|
|
// List returns a list of the objects that are direct descendants of the given
|
|
// path.
|
|
func (d *driver) List(_ context.Context, subPath string) ([]string, error) {
|
|
fullPath := d.fullPath(subPath)
|
|
|
|
dir, err := os.Open(fullPath)
|
|
if err != nil {
|
|
if os.IsNotExist(err) {
|
|
return nil, storagedriver.PathNotFoundError{Path: subPath}
|
|
}
|
|
return nil, err
|
|
}
|
|
|
|
defer dir.Close()
|
|
|
|
fileNames, err := dir.Readdirnames(0)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
keys := make([]string, 0, len(fileNames))
|
|
for _, fileName := range fileNames {
|
|
keys = append(keys, path.Join(subPath, fileName))
|
|
}
|
|
|
|
return keys, nil
|
|
}
|
|
|
|
// Move moves an object stored at sourcePath to destPath, removing the original
|
|
// object.
|
|
func (d *driver) Move(_ context.Context, sourcePath string, destPath string) error {
|
|
source := d.fullPath(sourcePath)
|
|
dest := d.fullPath(destPath)
|
|
|
|
if _, err := os.Stat(source); os.IsNotExist(err) {
|
|
return storagedriver.PathNotFoundError{Path: sourcePath}
|
|
}
|
|
|
|
if err := os.MkdirAll(path.Dir(dest), 0o777); err != nil {
|
|
return err
|
|
}
|
|
|
|
err := os.Rename(source, dest)
|
|
return err
|
|
}
|
|
|
|
// Delete recursively deletes all objects stored at "path" and its subpaths.
|
|
func (d *driver) Delete(_ context.Context, subPath string) error {
|
|
fullPath := d.fullPath(subPath)
|
|
|
|
_, err := os.Stat(fullPath)
|
|
if err != nil && !os.IsNotExist(err) {
|
|
return err
|
|
} else if err != nil {
|
|
return storagedriver.PathNotFoundError{Path: subPath}
|
|
}
|
|
|
|
err = os.RemoveAll(fullPath)
|
|
return err
|
|
}
|
|
|
|
// RedirectURL returns a URL which may be used to retrieve the content stored at the given path.
|
|
func (d *driver) RedirectURL(_ context.Context, _ string, _ string) (string, error) {
|
|
return "", nil
|
|
}
|
|
|
|
// Walk traverses a filesystem defined within driver, starting
|
|
// from the given path, calling f on each file and directory.
|
|
func (d *driver) Walk(
|
|
ctx context.Context,
|
|
path string,
|
|
f storagedriver.WalkFn,
|
|
options ...func(*storagedriver.WalkOptions),
|
|
) error {
|
|
return storagedriver.WalkFallback(ctx, d, path, f, options...)
|
|
}
|
|
|
|
// fullPath returns the absolute path of a key within the Driver's storage.
|
|
func (d *driver) fullPath(subPath string) string {
|
|
return path.Join(d.rootDirectory, subPath)
|
|
}
|
|
|
|
type fileInfo struct {
|
|
os.FileInfo
|
|
path string
|
|
}
|
|
|
|
var _ storagedriver.FileInfo = fileInfo{}
|
|
|
|
// Path provides the full path of the target of this file info.
|
|
func (fi fileInfo) Path() string {
|
|
return fi.path
|
|
}
|
|
|
|
// Size returns current length in bytes of the file. The return value can
|
|
// be used to write to the end of the file at path. The value is
|
|
// meaningless if IsDir returns true.
|
|
func (fi fileInfo) Size() int64 {
|
|
if fi.IsDir() {
|
|
return 0
|
|
}
|
|
|
|
return fi.FileInfo.Size()
|
|
}
|
|
|
|
// ModTime returns the modification time for the file. For backends that
|
|
// don't have a modification time, the creation time should be returned.
|
|
func (fi fileInfo) ModTime() time.Time {
|
|
return fi.FileInfo.ModTime()
|
|
}
|
|
|
|
// IsDir returns true if the path is a directory.
|
|
func (fi fileInfo) IsDir() bool {
|
|
return fi.FileInfo.IsDir()
|
|
}
|
|
|
|
type fileWriter struct {
|
|
file *os.File
|
|
size int64
|
|
bw *bufio.Writer
|
|
closed bool
|
|
committed bool
|
|
cancelled bool
|
|
}
|
|
|
|
func newFileWriter(file *os.File, size int64) *fileWriter {
|
|
return &fileWriter{
|
|
file: file,
|
|
size: size,
|
|
bw: bufio.NewWriter(file),
|
|
}
|
|
}
|
|
|
|
func (fw *fileWriter) Write(p []byte) (int, error) {
|
|
if fw.closed {
|
|
return 0, fmt.Errorf("already closed")
|
|
} else if fw.committed {
|
|
return 0, fmt.Errorf("already committed")
|
|
} else if fw.cancelled {
|
|
return 0, fmt.Errorf("already cancelled")
|
|
}
|
|
n, err := fw.bw.Write(p)
|
|
fw.size += int64(n)
|
|
return n, err
|
|
}
|
|
|
|
func (fw *fileWriter) Size() int64 {
|
|
return fw.size
|
|
}
|
|
|
|
func (fw *fileWriter) Close() error {
|
|
if fw.closed {
|
|
return fmt.Errorf("already closed")
|
|
}
|
|
|
|
if err := fw.bw.Flush(); err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := fw.file.Sync(); err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := fw.file.Close(); err != nil {
|
|
return err
|
|
}
|
|
fw.closed = true
|
|
return nil
|
|
}
|
|
|
|
func (fw *fileWriter) Cancel(_ context.Context) error {
|
|
if fw.closed {
|
|
return fmt.Errorf("already closed")
|
|
}
|
|
|
|
fw.cancelled = true
|
|
fw.file.Close()
|
|
return os.Remove(fw.file.Name())
|
|
}
|
|
|
|
func (fw *fileWriter) Commit(_ context.Context) error {
|
|
if fw.closed {
|
|
return fmt.Errorf("already closed")
|
|
} else if fw.committed {
|
|
return fmt.Errorf("already committed")
|
|
} else if fw.cancelled {
|
|
return fmt.Errorf("already cancelled")
|
|
}
|
|
|
|
if err := fw.bw.Flush(); err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := fw.file.Sync(); err != nil {
|
|
return err
|
|
}
|
|
|
|
fw.committed = true
|
|
return nil
|
|
}
|