forked from kevadesu/forgejo
Move some code into models/git (#19879)
* Move access and repo permission to models/perm/access * fix test * Move some git related files into sub package models/git * Fix build * fix git test * move lfs to sub package * move more git related functions to models/git * Move functions sequence * Some improvements per @KN4CK3R and @delvh
This commit is contained in:
parent
a9dc9b06e4
commit
110fc57cbc
67 changed files with 549 additions and 495 deletions
603
models/git/branches.go
Normal file
603
models/git/branches.go
Normal file
|
@ -0,0 +1,603 @@
|
|||
// Copyright 2016 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package git
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
"code.gitea.io/gitea/models/organization"
|
||||
"code.gitea.io/gitea/models/perm"
|
||||
access_model "code.gitea.io/gitea/models/perm/access"
|
||||
repo_model "code.gitea.io/gitea/models/repo"
|
||||
"code.gitea.io/gitea/models/unit"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/base"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
"code.gitea.io/gitea/modules/util"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
)
|
||||
|
||||
// ProtectedBranch struct
|
||||
type ProtectedBranch struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
RepoID int64 `xorm:"UNIQUE(s)"`
|
||||
BranchName string `xorm:"UNIQUE(s)"`
|
||||
CanPush bool `xorm:"NOT NULL DEFAULT false"`
|
||||
EnableWhitelist bool
|
||||
WhitelistUserIDs []int64 `xorm:"JSON TEXT"`
|
||||
WhitelistTeamIDs []int64 `xorm:"JSON TEXT"`
|
||||
EnableMergeWhitelist bool `xorm:"NOT NULL DEFAULT false"`
|
||||
WhitelistDeployKeys bool `xorm:"NOT NULL DEFAULT false"`
|
||||
MergeWhitelistUserIDs []int64 `xorm:"JSON TEXT"`
|
||||
MergeWhitelistTeamIDs []int64 `xorm:"JSON TEXT"`
|
||||
EnableStatusCheck bool `xorm:"NOT NULL DEFAULT false"`
|
||||
StatusCheckContexts []string `xorm:"JSON TEXT"`
|
||||
EnableApprovalsWhitelist bool `xorm:"NOT NULL DEFAULT false"`
|
||||
ApprovalsWhitelistUserIDs []int64 `xorm:"JSON TEXT"`
|
||||
ApprovalsWhitelistTeamIDs []int64 `xorm:"JSON TEXT"`
|
||||
RequiredApprovals int64 `xorm:"NOT NULL DEFAULT 0"`
|
||||
BlockOnRejectedReviews bool `xorm:"NOT NULL DEFAULT false"`
|
||||
BlockOnOfficialReviewRequests bool `xorm:"NOT NULL DEFAULT false"`
|
||||
BlockOnOutdatedBranch bool `xorm:"NOT NULL DEFAULT false"`
|
||||
DismissStaleApprovals bool `xorm:"NOT NULL DEFAULT false"`
|
||||
RequireSignedCommits bool `xorm:"NOT NULL DEFAULT false"`
|
||||
ProtectedFilePatterns string `xorm:"TEXT"`
|
||||
UnprotectedFilePatterns string `xorm:"TEXT"`
|
||||
|
||||
CreatedUnix timeutil.TimeStamp `xorm:"created"`
|
||||
UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
db.RegisterModel(new(ProtectedBranch))
|
||||
db.RegisterModel(new(DeletedBranch))
|
||||
db.RegisterModel(new(RenamedBranch))
|
||||
}
|
||||
|
||||
// IsProtected returns if the branch is protected
|
||||
func (protectBranch *ProtectedBranch) IsProtected() bool {
|
||||
return protectBranch.ID > 0
|
||||
}
|
||||
|
||||
// CanUserPush returns if some user could push to this protected branch
|
||||
func (protectBranch *ProtectedBranch) CanUserPush(userID int64) bool {
|
||||
if !protectBranch.CanPush {
|
||||
return false
|
||||
}
|
||||
|
||||
if !protectBranch.EnableWhitelist {
|
||||
if user, err := user_model.GetUserByID(userID); err != nil {
|
||||
log.Error("GetUserByID: %v", err)
|
||||
return false
|
||||
} else if repo, err := repo_model.GetRepositoryByID(protectBranch.RepoID); err != nil {
|
||||
log.Error("repo_model.GetRepositoryByID: %v", err)
|
||||
return false
|
||||
} else if writeAccess, err := access_model.HasAccessUnit(db.DefaultContext, user, repo, unit.TypeCode, perm.AccessModeWrite); err != nil {
|
||||
log.Error("HasAccessUnit: %v", err)
|
||||
return false
|
||||
} else {
|
||||
return writeAccess
|
||||
}
|
||||
}
|
||||
|
||||
if base.Int64sContains(protectBranch.WhitelistUserIDs, userID) {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(protectBranch.WhitelistTeamIDs) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
in, err := organization.IsUserInTeams(db.DefaultContext, userID, protectBranch.WhitelistTeamIDs)
|
||||
if err != nil {
|
||||
log.Error("IsUserInTeams: %v", err)
|
||||
return false
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
// IsUserMergeWhitelisted checks if some user is whitelisted to merge to this branch
|
||||
func IsUserMergeWhitelisted(ctx context.Context, protectBranch *ProtectedBranch, userID int64, permissionInRepo access_model.Permission) bool {
|
||||
if !protectBranch.EnableMergeWhitelist {
|
||||
// Then we need to fall back on whether the user has write permission
|
||||
return permissionInRepo.CanWrite(unit.TypeCode)
|
||||
}
|
||||
|
||||
if base.Int64sContains(protectBranch.MergeWhitelistUserIDs, userID) {
|
||||
return true
|
||||
}
|
||||
|
||||
if len(protectBranch.MergeWhitelistTeamIDs) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
in, err := organization.IsUserInTeams(ctx, userID, protectBranch.MergeWhitelistTeamIDs)
|
||||
if err != nil {
|
||||
log.Error("IsUserInTeams: %v", err)
|
||||
return false
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
// IsUserOfficialReviewer check if user is official reviewer for the branch (counts towards required approvals)
|
||||
func IsUserOfficialReviewer(protectBranch *ProtectedBranch, user *user_model.User) (bool, error) {
|
||||
return IsUserOfficialReviewerCtx(db.DefaultContext, protectBranch, user)
|
||||
}
|
||||
|
||||
// IsUserOfficialReviewerCtx check if user is official reviewer for the branch (counts towards required approvals)
|
||||
func IsUserOfficialReviewerCtx(ctx context.Context, protectBranch *ProtectedBranch, user *user_model.User) (bool, error) {
|
||||
repo, err := repo_model.GetRepositoryByIDCtx(ctx, protectBranch.RepoID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !protectBranch.EnableApprovalsWhitelist {
|
||||
// Anyone with write access is considered official reviewer
|
||||
writeAccess, err := access_model.HasAccessUnit(ctx, user, repo, unit.TypeCode, perm.AccessModeWrite)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return writeAccess, nil
|
||||
}
|
||||
|
||||
if base.Int64sContains(protectBranch.ApprovalsWhitelistUserIDs, user.ID) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
inTeam, err := organization.IsUserInTeams(ctx, user.ID, protectBranch.ApprovalsWhitelistTeamIDs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return inTeam, nil
|
||||
}
|
||||
|
||||
// GetProtectedFilePatterns parses a semicolon separated list of protected file patterns and returns a glob.Glob slice
|
||||
func (protectBranch *ProtectedBranch) GetProtectedFilePatterns() []glob.Glob {
|
||||
return getFilePatterns(protectBranch.ProtectedFilePatterns)
|
||||
}
|
||||
|
||||
// GetUnprotectedFilePatterns parses a semicolon separated list of unprotected file patterns and returns a glob.Glob slice
|
||||
func (protectBranch *ProtectedBranch) GetUnprotectedFilePatterns() []glob.Glob {
|
||||
return getFilePatterns(protectBranch.UnprotectedFilePatterns)
|
||||
}
|
||||
|
||||
func getFilePatterns(filePatterns string) []glob.Glob {
|
||||
extarr := make([]glob.Glob, 0, 10)
|
||||
for _, expr := range strings.Split(strings.ToLower(filePatterns), ";") {
|
||||
expr = strings.TrimSpace(expr)
|
||||
if expr != "" {
|
||||
if g, err := glob.Compile(expr, '.', '/'); err != nil {
|
||||
log.Info("Invalid glob expression '%s' (skipped): %v", expr, err)
|
||||
} else {
|
||||
extarr = append(extarr, g)
|
||||
}
|
||||
}
|
||||
}
|
||||
return extarr
|
||||
}
|
||||
|
||||
// MergeBlockedByProtectedFiles returns true if merge is blocked by protected files change
|
||||
func (protectBranch *ProtectedBranch) MergeBlockedByProtectedFiles(changedProtectedFiles []string) bool {
|
||||
glob := protectBranch.GetProtectedFilePatterns()
|
||||
if len(glob) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return len(changedProtectedFiles) > 0
|
||||
}
|
||||
|
||||
// IsProtectedFile return if path is protected
|
||||
func (protectBranch *ProtectedBranch) IsProtectedFile(patterns []glob.Glob, path string) bool {
|
||||
if len(patterns) == 0 {
|
||||
patterns = protectBranch.GetProtectedFilePatterns()
|
||||
if len(patterns) == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
lpath := strings.ToLower(strings.TrimSpace(path))
|
||||
|
||||
r := false
|
||||
for _, pat := range patterns {
|
||||
if pat.Match(lpath) {
|
||||
r = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// IsUnprotectedFile return if path is unprotected
|
||||
func (protectBranch *ProtectedBranch) IsUnprotectedFile(patterns []glob.Glob, path string) bool {
|
||||
if len(patterns) == 0 {
|
||||
patterns = protectBranch.GetUnprotectedFilePatterns()
|
||||
if len(patterns) == 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
lpath := strings.ToLower(strings.TrimSpace(path))
|
||||
|
||||
r := false
|
||||
for _, pat := range patterns {
|
||||
if pat.Match(lpath) {
|
||||
r = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// GetProtectedBranchBy getting protected branch by ID/Name
|
||||
func GetProtectedBranchBy(ctx context.Context, repoID int64, branchName string) (*ProtectedBranch, error) {
|
||||
rel := &ProtectedBranch{RepoID: repoID, BranchName: branchName}
|
||||
has, err := db.GetByBean(ctx, rel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !has {
|
||||
return nil, nil
|
||||
}
|
||||
return rel, nil
|
||||
}
|
||||
|
||||
// WhitelistOptions represent all sorts of whitelists used for protected branches
|
||||
type WhitelistOptions struct {
|
||||
UserIDs []int64
|
||||
TeamIDs []int64
|
||||
|
||||
MergeUserIDs []int64
|
||||
MergeTeamIDs []int64
|
||||
|
||||
ApprovalsUserIDs []int64
|
||||
ApprovalsTeamIDs []int64
|
||||
}
|
||||
|
||||
// UpdateProtectBranch saves branch protection options of repository.
|
||||
// If ID is 0, it creates a new record. Otherwise, updates existing record.
|
||||
// This function also performs check if whitelist user and team's IDs have been changed
|
||||
// to avoid unnecessary whitelist delete and regenerate.
|
||||
func UpdateProtectBranch(ctx context.Context, repo *repo_model.Repository, protectBranch *ProtectedBranch, opts WhitelistOptions) (err error) {
|
||||
if err = repo.GetOwner(ctx); err != nil {
|
||||
return fmt.Errorf("GetOwner: %v", err)
|
||||
}
|
||||
|
||||
whitelist, err := updateUserWhitelist(ctx, repo, protectBranch.WhitelistUserIDs, opts.UserIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
protectBranch.WhitelistUserIDs = whitelist
|
||||
|
||||
whitelist, err = updateUserWhitelist(ctx, repo, protectBranch.MergeWhitelistUserIDs, opts.MergeUserIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
protectBranch.MergeWhitelistUserIDs = whitelist
|
||||
|
||||
whitelist, err = updateApprovalWhitelist(ctx, repo, protectBranch.ApprovalsWhitelistUserIDs, opts.ApprovalsUserIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
protectBranch.ApprovalsWhitelistUserIDs = whitelist
|
||||
|
||||
// if the repo is in an organization
|
||||
whitelist, err = updateTeamWhitelist(ctx, repo, protectBranch.WhitelistTeamIDs, opts.TeamIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
protectBranch.WhitelistTeamIDs = whitelist
|
||||
|
||||
whitelist, err = updateTeamWhitelist(ctx, repo, protectBranch.MergeWhitelistTeamIDs, opts.MergeTeamIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
protectBranch.MergeWhitelistTeamIDs = whitelist
|
||||
|
||||
whitelist, err = updateTeamWhitelist(ctx, repo, protectBranch.ApprovalsWhitelistTeamIDs, opts.ApprovalsTeamIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
protectBranch.ApprovalsWhitelistTeamIDs = whitelist
|
||||
|
||||
// Make sure protectBranch.ID is not 0 for whitelists
|
||||
if protectBranch.ID == 0 {
|
||||
if _, err = db.GetEngine(ctx).Insert(protectBranch); err != nil {
|
||||
return fmt.Errorf("Insert: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err = db.GetEngine(ctx).ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil {
|
||||
return fmt.Errorf("Update: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetProtectedBranches get all protected branches
|
||||
func GetProtectedBranches(repoID int64) ([]*ProtectedBranch, error) {
|
||||
protectedBranches := make([]*ProtectedBranch, 0)
|
||||
return protectedBranches, db.GetEngine(db.DefaultContext).Find(&protectedBranches, &ProtectedBranch{RepoID: repoID})
|
||||
}
|
||||
|
||||
// IsProtectedBranch checks if branch is protected
|
||||
func IsProtectedBranch(repoID int64, branchName string) (bool, error) {
|
||||
protectedBranch := &ProtectedBranch{
|
||||
RepoID: repoID,
|
||||
BranchName: branchName,
|
||||
}
|
||||
|
||||
has, err := db.GetEngine(db.DefaultContext).Exist(protectedBranch)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
return has, nil
|
||||
}
|
||||
|
||||
// updateApprovalWhitelist checks whether the user whitelist changed and returns a whitelist with
|
||||
// the users from newWhitelist which have explicit read or write access to the repo.
|
||||
func updateApprovalWhitelist(ctx context.Context, repo *repo_model.Repository, currentWhitelist, newWhitelist []int64) (whitelist []int64, err error) {
|
||||
hasUsersChanged := !util.IsSliceInt64Eq(currentWhitelist, newWhitelist)
|
||||
if !hasUsersChanged {
|
||||
return currentWhitelist, nil
|
||||
}
|
||||
|
||||
whitelist = make([]int64, 0, len(newWhitelist))
|
||||
for _, userID := range newWhitelist {
|
||||
if reader, err := access_model.IsRepoReader(ctx, repo, userID); err != nil {
|
||||
return nil, err
|
||||
} else if !reader {
|
||||
continue
|
||||
}
|
||||
whitelist = append(whitelist, userID)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// updateUserWhitelist checks whether the user whitelist changed and returns a whitelist with
|
||||
// the users from newWhitelist which have write access to the repo.
|
||||
func updateUserWhitelist(ctx context.Context, repo *repo_model.Repository, currentWhitelist, newWhitelist []int64) (whitelist []int64, err error) {
|
||||
hasUsersChanged := !util.IsSliceInt64Eq(currentWhitelist, newWhitelist)
|
||||
if !hasUsersChanged {
|
||||
return currentWhitelist, nil
|
||||
}
|
||||
|
||||
whitelist = make([]int64, 0, len(newWhitelist))
|
||||
for _, userID := range newWhitelist {
|
||||
user, err := user_model.GetUserByIDCtx(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetUserByID [user_id: %d, repo_id: %d]: %v", userID, repo.ID, err)
|
||||
}
|
||||
perm, err := access_model.GetUserRepoPermission(ctx, repo, user)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetUserRepoPermission [user_id: %d, repo_id: %d]: %v", userID, repo.ID, err)
|
||||
}
|
||||
|
||||
if !perm.CanWrite(unit.TypeCode) {
|
||||
continue // Drop invalid user ID
|
||||
}
|
||||
|
||||
whitelist = append(whitelist, userID)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// updateTeamWhitelist checks whether the team whitelist changed and returns a whitelist with
|
||||
// the teams from newWhitelist which have write access to the repo.
|
||||
func updateTeamWhitelist(ctx context.Context, repo *repo_model.Repository, currentWhitelist, newWhitelist []int64) (whitelist []int64, err error) {
|
||||
hasTeamsChanged := !util.IsSliceInt64Eq(currentWhitelist, newWhitelist)
|
||||
if !hasTeamsChanged {
|
||||
return currentWhitelist, nil
|
||||
}
|
||||
|
||||
teams, err := organization.GetTeamsWithAccessToRepo(ctx, repo.OwnerID, repo.ID, perm.AccessModeRead)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("GetTeamsWithAccessToRepo [org_id: %d, repo_id: %d]: %v", repo.OwnerID, repo.ID, err)
|
||||
}
|
||||
|
||||
whitelist = make([]int64, 0, len(teams))
|
||||
for i := range teams {
|
||||
if util.IsInt64InSlice(teams[i].ID, newWhitelist) {
|
||||
whitelist = append(whitelist, teams[i].ID)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteProtectedBranch removes ProtectedBranch relation between the user and repository.
|
||||
func DeleteProtectedBranch(repoID, id int64) (err error) {
|
||||
protectedBranch := &ProtectedBranch{
|
||||
RepoID: repoID,
|
||||
ID: id,
|
||||
}
|
||||
|
||||
if affected, err := db.GetEngine(db.DefaultContext).Delete(protectedBranch); err != nil {
|
||||
return err
|
||||
} else if affected != 1 {
|
||||
return fmt.Errorf("delete protected branch ID(%v) failed", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeletedBranch struct
|
||||
type DeletedBranch struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
RepoID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
|
||||
Name string `xorm:"UNIQUE(s) NOT NULL"`
|
||||
Commit string `xorm:"UNIQUE(s) NOT NULL"`
|
||||
DeletedByID int64 `xorm:"INDEX"`
|
||||
DeletedBy *user_model.User `xorm:"-"`
|
||||
DeletedUnix timeutil.TimeStamp `xorm:"INDEX created"`
|
||||
}
|
||||
|
||||
// AddDeletedBranch adds a deleted branch to the database
|
||||
func AddDeletedBranch(repoID int64, branchName, commit string, deletedByID int64) error {
|
||||
deletedBranch := &DeletedBranch{
|
||||
RepoID: repoID,
|
||||
Name: branchName,
|
||||
Commit: commit,
|
||||
DeletedByID: deletedByID,
|
||||
}
|
||||
|
||||
_, err := db.GetEngine(db.DefaultContext).Insert(deletedBranch)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDeletedBranches returns all the deleted branches
|
||||
func GetDeletedBranches(repoID int64) ([]*DeletedBranch, error) {
|
||||
deletedBranches := make([]*DeletedBranch, 0)
|
||||
return deletedBranches, db.GetEngine(db.DefaultContext).Where("repo_id = ?", repoID).Desc("deleted_unix").Find(&deletedBranches)
|
||||
}
|
||||
|
||||
// GetDeletedBranchByID get a deleted branch by its ID
|
||||
func GetDeletedBranchByID(repoID, id int64) (*DeletedBranch, error) {
|
||||
deletedBranch := &DeletedBranch{}
|
||||
has, err := db.GetEngine(db.DefaultContext).Where("repo_id = ?", repoID).And("id = ?", id).Get(deletedBranch)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !has {
|
||||
return nil, nil
|
||||
}
|
||||
return deletedBranch, nil
|
||||
}
|
||||
|
||||
// RemoveDeletedBranchByID removes a deleted branch from the database
|
||||
func RemoveDeletedBranchByID(repoID, id int64) (err error) {
|
||||
deletedBranch := &DeletedBranch{
|
||||
RepoID: repoID,
|
||||
ID: id,
|
||||
}
|
||||
|
||||
if affected, err := db.GetEngine(db.DefaultContext).Delete(deletedBranch); err != nil {
|
||||
return err
|
||||
} else if affected != 1 {
|
||||
return fmt.Errorf("remove deleted branch ID(%v) failed", id)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadUser loads the user that deleted the branch
|
||||
// When there's no user found it returns a user_model.NewGhostUser
|
||||
func (deletedBranch *DeletedBranch) LoadUser() {
|
||||
user, err := user_model.GetUserByID(deletedBranch.DeletedByID)
|
||||
if err != nil {
|
||||
user = user_model.NewGhostUser()
|
||||
}
|
||||
deletedBranch.DeletedBy = user
|
||||
}
|
||||
|
||||
// RemoveDeletedBranchByName removes all deleted branches
|
||||
func RemoveDeletedBranchByName(repoID int64, branch string) error {
|
||||
_, err := db.GetEngine(db.DefaultContext).Where("repo_id=? AND name=?", repoID, branch).Delete(new(DeletedBranch))
|
||||
return err
|
||||
}
|
||||
|
||||
// RemoveOldDeletedBranches removes old deleted branches
|
||||
func RemoveOldDeletedBranches(ctx context.Context, olderThan time.Duration) {
|
||||
// Nothing to do for shutdown or terminate
|
||||
log.Trace("Doing: DeletedBranchesCleanup")
|
||||
|
||||
deleteBefore := time.Now().Add(-olderThan)
|
||||
_, err := db.GetEngine(db.DefaultContext).Where("deleted_unix < ?", deleteBefore.Unix()).Delete(new(DeletedBranch))
|
||||
if err != nil {
|
||||
log.Error("DeletedBranchesCleanup: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// RenamedBranch provide renamed branch log
|
||||
// will check it when a branch can't be found
|
||||
type RenamedBranch struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
RepoID int64 `xorm:"INDEX NOT NULL"`
|
||||
From string
|
||||
To string
|
||||
CreatedUnix timeutil.TimeStamp `xorm:"created"`
|
||||
}
|
||||
|
||||
// FindRenamedBranch check if a branch was renamed
|
||||
func FindRenamedBranch(repoID int64, from string) (branch *RenamedBranch, exist bool, err error) {
|
||||
branch = &RenamedBranch{
|
||||
RepoID: repoID,
|
||||
From: from,
|
||||
}
|
||||
exist, err = db.GetEngine(db.DefaultContext).Get(branch)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// RenameBranch rename a branch
|
||||
func RenameBranch(repo *repo_model.Repository, from, to string, gitAction func(isDefault bool) error) (err error) {
|
||||
ctx, committer, err := db.TxContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
sess := db.GetEngine(ctx)
|
||||
// 1. update default branch if needed
|
||||
isDefault := repo.DefaultBranch == from
|
||||
if isDefault {
|
||||
repo.DefaultBranch = to
|
||||
_, err = sess.ID(repo.ID).Cols("default_branch").Update(repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Update protected branch if needed
|
||||
protectedBranch, err := GetProtectedBranchBy(ctx, repo.ID, from)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if protectedBranch != nil {
|
||||
protectedBranch.BranchName = to
|
||||
_, err = sess.ID(protectedBranch.ID).Cols("branch_name").Update(protectedBranch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Update all not merged pull request base branch name
|
||||
_, err = sess.Table("pull_request").Where("base_repo_id=? AND base_branch=? AND has_merged=?",
|
||||
repo.ID, from, false).
|
||||
Update(map[string]interface{}{"base_branch": to})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 4. do git action
|
||||
if err = gitAction(isDefault); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 5. insert renamed branch record
|
||||
renamedBranch := &RenamedBranch{
|
||||
RepoID: repo.ID,
|
||||
From: from,
|
||||
To: to,
|
||||
}
|
||||
err = db.Insert(ctx, renamedBranch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return committer.Commit()
|
||||
}
|
163
models/git/branches_test.go
Normal file
163
models/git/branches_test.go
Normal file
|
@ -0,0 +1,163 @@
|
|||
// Copyright 2017 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package git_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models"
|
||||
"code.gitea.io/gitea/models/db"
|
||||
git_model "code.gitea.io/gitea/models/git"
|
||||
repo_model "code.gitea.io/gitea/models/repo"
|
||||
"code.gitea.io/gitea/models/unittest"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAddDeletedBranch(t *testing.T) {
|
||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}).(*repo_model.Repository)
|
||||
firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.DeletedBranch{ID: 1}).(*git_model.DeletedBranch)
|
||||
|
||||
assert.Error(t, git_model.AddDeletedBranch(repo.ID, firstBranch.Name, firstBranch.Commit, firstBranch.DeletedByID))
|
||||
assert.NoError(t, git_model.AddDeletedBranch(repo.ID, "test", "5655464564554545466464656", int64(1)))
|
||||
}
|
||||
|
||||
func TestGetDeletedBranches(t *testing.T) {
|
||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}).(*repo_model.Repository)
|
||||
|
||||
branches, err := git_model.GetDeletedBranches(repo.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, branches, 2)
|
||||
}
|
||||
|
||||
func TestGetDeletedBranch(t *testing.T) {
|
||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||
firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.DeletedBranch{ID: 1}).(*git_model.DeletedBranch)
|
||||
|
||||
assert.NotNil(t, getDeletedBranch(t, firstBranch))
|
||||
}
|
||||
|
||||
func TestDeletedBranchLoadUser(t *testing.T) {
|
||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||
|
||||
firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.DeletedBranch{ID: 1}).(*git_model.DeletedBranch)
|
||||
secondBranch := unittest.AssertExistsAndLoadBean(t, &git_model.DeletedBranch{ID: 2}).(*git_model.DeletedBranch)
|
||||
|
||||
branch := getDeletedBranch(t, firstBranch)
|
||||
assert.Nil(t, branch.DeletedBy)
|
||||
branch.LoadUser()
|
||||
assert.NotNil(t, branch.DeletedBy)
|
||||
assert.Equal(t, "user1", branch.DeletedBy.Name)
|
||||
|
||||
branch = getDeletedBranch(t, secondBranch)
|
||||
assert.Nil(t, branch.DeletedBy)
|
||||
branch.LoadUser()
|
||||
assert.NotNil(t, branch.DeletedBy)
|
||||
assert.Equal(t, "Ghost", branch.DeletedBy.Name)
|
||||
}
|
||||
|
||||
func TestRemoveDeletedBranch(t *testing.T) {
|
||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}).(*repo_model.Repository)
|
||||
|
||||
firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.DeletedBranch{ID: 1}).(*git_model.DeletedBranch)
|
||||
|
||||
err := git_model.RemoveDeletedBranchByID(repo.ID, 1)
|
||||
assert.NoError(t, err)
|
||||
unittest.AssertNotExistsBean(t, firstBranch)
|
||||
unittest.AssertExistsAndLoadBean(t, &git_model.DeletedBranch{ID: 2})
|
||||
}
|
||||
|
||||
func getDeletedBranch(t *testing.T, branch *git_model.DeletedBranch) *git_model.DeletedBranch {
|
||||
repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}).(*repo_model.Repository)
|
||||
|
||||
deletedBranch, err := git_model.GetDeletedBranchByID(repo.ID, branch.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, branch.ID, deletedBranch.ID)
|
||||
assert.Equal(t, branch.Name, deletedBranch.Name)
|
||||
assert.Equal(t, branch.Commit, deletedBranch.Commit)
|
||||
assert.Equal(t, branch.DeletedByID, deletedBranch.DeletedByID)
|
||||
|
||||
return deletedBranch
|
||||
}
|
||||
|
||||
func TestFindRenamedBranch(t *testing.T) {
|
||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||
branch, exist, err := git_model.FindRenamedBranch(1, "dev")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, exist)
|
||||
assert.Equal(t, "master", branch.To)
|
||||
|
||||
_, exist, err = git_model.FindRenamedBranch(1, "unknow")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, false, exist)
|
||||
}
|
||||
|
||||
func TestRenameBranch(t *testing.T) {
|
||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||
repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}).(*repo_model.Repository)
|
||||
_isDefault := false
|
||||
|
||||
ctx, committer, err := db.TxContext()
|
||||
defer committer.Close()
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, git_model.UpdateProtectBranch(ctx, repo1, &git_model.ProtectedBranch{
|
||||
RepoID: repo1.ID,
|
||||
BranchName: "master",
|
||||
}, git_model.WhitelistOptions{}))
|
||||
assert.NoError(t, committer.Commit())
|
||||
|
||||
assert.NoError(t, git_model.RenameBranch(repo1, "master", "main", func(isDefault bool) error {
|
||||
_isDefault = isDefault
|
||||
return nil
|
||||
}))
|
||||
|
||||
assert.Equal(t, true, _isDefault)
|
||||
repo1 = unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}).(*repo_model.Repository)
|
||||
assert.Equal(t, "main", repo1.DefaultBranch)
|
||||
|
||||
pull := unittest.AssertExistsAndLoadBean(t, &models.PullRequest{ID: 1}).(*models.PullRequest) // merged
|
||||
assert.Equal(t, "master", pull.BaseBranch)
|
||||
|
||||
pull = unittest.AssertExistsAndLoadBean(t, &models.PullRequest{ID: 2}).(*models.PullRequest) // open
|
||||
assert.Equal(t, "main", pull.BaseBranch)
|
||||
|
||||
renamedBranch := unittest.AssertExistsAndLoadBean(t, &git_model.RenamedBranch{ID: 2}).(*git_model.RenamedBranch)
|
||||
assert.Equal(t, "master", renamedBranch.From)
|
||||
assert.Equal(t, "main", renamedBranch.To)
|
||||
assert.Equal(t, int64(1), renamedBranch.RepoID)
|
||||
|
||||
unittest.AssertExistsAndLoadBean(t, &git_model.ProtectedBranch{
|
||||
RepoID: repo1.ID,
|
||||
BranchName: "main",
|
||||
})
|
||||
}
|
||||
|
||||
func TestOnlyGetDeletedBranchOnCorrectRepo(t *testing.T) {
|
||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||
|
||||
// Get deletedBranch with ID of 1 on repo with ID 2.
|
||||
// This should return a nil branch as this deleted branch
|
||||
// is actually on repo with ID 1.
|
||||
repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2}).(*repo_model.Repository)
|
||||
|
||||
deletedBranch, err := git_model.GetDeletedBranchByID(repo2.ID, 1)
|
||||
|
||||
// Expect no error, and the returned branch is nil.
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, deletedBranch)
|
||||
|
||||
// Now get the deletedBranch with ID of 1 on repo with ID 1.
|
||||
// This should return the deletedBranch.
|
||||
repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}).(*repo_model.Repository)
|
||||
|
||||
deletedBranch, err = git_model.GetDeletedBranchByID(repo1.ID, 1)
|
||||
|
||||
// Expect no error, and the returned branch to be not nil.
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, deletedBranch)
|
||||
}
|
370
models/git/commit_status.go
Normal file
370
models/git/commit_status.go
Normal file
|
@ -0,0 +1,370 @@
|
|||
// Copyright 2017 Gitea. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package git
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
asymkey_model "code.gitea.io/gitea/models/asymkey"
|
||||
"code.gitea.io/gitea/models/db"
|
||||
repo_model "code.gitea.io/gitea/models/repo"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/git"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
api "code.gitea.io/gitea/modules/structs"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
|
||||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
// CommitStatus holds a single Status of a single Commit
|
||||
type CommitStatus struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
Index int64 `xorm:"INDEX UNIQUE(repo_sha_index)"`
|
||||
RepoID int64 `xorm:"INDEX UNIQUE(repo_sha_index)"`
|
||||
Repo *repo_model.Repository `xorm:"-"`
|
||||
State api.CommitStatusState `xorm:"VARCHAR(7) NOT NULL"`
|
||||
SHA string `xorm:"VARCHAR(64) NOT NULL INDEX UNIQUE(repo_sha_index)"`
|
||||
TargetURL string `xorm:"TEXT"`
|
||||
Description string `xorm:"TEXT"`
|
||||
ContextHash string `xorm:"char(40) index"`
|
||||
Context string `xorm:"TEXT"`
|
||||
Creator *user_model.User `xorm:"-"`
|
||||
CreatorID int64
|
||||
|
||||
CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
|
||||
UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
db.RegisterModel(new(CommitStatus))
|
||||
db.RegisterModel(new(CommitStatusIndex))
|
||||
}
|
||||
|
||||
// upsertCommitStatusIndex the function will not return until it acquires the lock or receives an error.
|
||||
func upsertCommitStatusIndex(ctx context.Context, repoID int64, sha string) (err error) {
|
||||
// An atomic UPSERT operation (INSERT/UPDATE) is the only operation
|
||||
// that ensures that the key is actually locked.
|
||||
switch {
|
||||
case setting.Database.UseSQLite3 || setting.Database.UsePostgreSQL:
|
||||
_, err = db.Exec(ctx, "INSERT INTO `commit_status_index` (repo_id, sha, max_index) "+
|
||||
"VALUES (?,?,1) ON CONFLICT (repo_id,sha) DO UPDATE SET max_index = `commit_status_index`.max_index+1",
|
||||
repoID, sha)
|
||||
case setting.Database.UseMySQL:
|
||||
_, err = db.Exec(ctx, "INSERT INTO `commit_status_index` (repo_id, sha, max_index) "+
|
||||
"VALUES (?,?,1) ON DUPLICATE KEY UPDATE max_index = max_index+1",
|
||||
repoID, sha)
|
||||
case setting.Database.UseMSSQL:
|
||||
// https://weblogs.sqlteam.com/dang/2009/01/31/upsert-race-condition-with-merge/
|
||||
_, err = db.Exec(ctx, "MERGE `commit_status_index` WITH (HOLDLOCK) as target "+
|
||||
"USING (SELECT ? AS repo_id, ? AS sha) AS src "+
|
||||
"ON src.repo_id = target.repo_id AND src.sha = target.sha "+
|
||||
"WHEN MATCHED THEN UPDATE SET target.max_index = target.max_index+1 "+
|
||||
"WHEN NOT MATCHED THEN INSERT (repo_id, sha, max_index) "+
|
||||
"VALUES (src.repo_id, src.sha, 1);",
|
||||
repoID, sha)
|
||||
default:
|
||||
return fmt.Errorf("database type not supported")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// GetNextCommitStatusIndex retried 3 times to generate a resource index
|
||||
func GetNextCommitStatusIndex(repoID int64, sha string) (int64, error) {
|
||||
for i := 0; i < db.MaxDupIndexAttempts; i++ {
|
||||
idx, err := getNextCommitStatusIndex(repoID, sha)
|
||||
if err == db.ErrResouceOutdated {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return idx, nil
|
||||
}
|
||||
return 0, db.ErrGetResourceIndexFailed
|
||||
}
|
||||
|
||||
// getNextCommitStatusIndex return the next index
|
||||
func getNextCommitStatusIndex(repoID int64, sha string) (int64, error) {
|
||||
ctx, commiter, err := db.TxContext()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer commiter.Close()
|
||||
|
||||
var preIdx int64
|
||||
_, err = db.GetEngine(ctx).SQL("SELECT max_index FROM `commit_status_index` WHERE repo_id = ? AND sha = ?", repoID, sha).Get(&preIdx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := upsertCommitStatusIndex(ctx, repoID, sha); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var curIdx int64
|
||||
has, err := db.GetEngine(ctx).SQL("SELECT max_index FROM `commit_status_index` WHERE repo_id = ? AND sha = ? AND max_index=?", repoID, sha, preIdx+1).Get(&curIdx)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if !has {
|
||||
return 0, db.ErrResouceOutdated
|
||||
}
|
||||
if err := commiter.Commit(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return curIdx, nil
|
||||
}
|
||||
|
||||
func (status *CommitStatus) loadAttributes(ctx context.Context) (err error) {
|
||||
if status.Repo == nil {
|
||||
status.Repo, err = repo_model.GetRepositoryByIDCtx(ctx, status.RepoID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getRepositoryByID [%d]: %v", status.RepoID, err)
|
||||
}
|
||||
}
|
||||
if status.Creator == nil && status.CreatorID > 0 {
|
||||
status.Creator, err = user_model.GetUserByIDCtx(ctx, status.CreatorID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getUserByID [%d]: %v", status.CreatorID, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// APIURL returns the absolute APIURL to this commit-status.
|
||||
func (status *CommitStatus) APIURL() string {
|
||||
_ = status.loadAttributes(db.DefaultContext)
|
||||
return status.Repo.APIURL() + "/statuses/" + url.PathEscape(status.SHA)
|
||||
}
|
||||
|
||||
// CalcCommitStatus returns commit status state via some status, the commit statues should order by id desc
|
||||
func CalcCommitStatus(statuses []*CommitStatus) *CommitStatus {
|
||||
var lastStatus *CommitStatus
|
||||
var state api.CommitStatusState
|
||||
for _, status := range statuses {
|
||||
if status.State.NoBetterThan(state) {
|
||||
state = status.State
|
||||
lastStatus = status
|
||||
}
|
||||
}
|
||||
if lastStatus == nil {
|
||||
if len(statuses) > 0 {
|
||||
lastStatus = statuses[0]
|
||||
} else {
|
||||
lastStatus = &CommitStatus{}
|
||||
}
|
||||
}
|
||||
return lastStatus
|
||||
}
|
||||
|
||||
// CommitStatusOptions holds the options for query commit statuses
|
||||
type CommitStatusOptions struct {
|
||||
db.ListOptions
|
||||
State string
|
||||
SortType string
|
||||
}
|
||||
|
||||
// GetCommitStatuses returns all statuses for a given commit.
|
||||
func GetCommitStatuses(repo *repo_model.Repository, sha string, opts *CommitStatusOptions) ([]*CommitStatus, int64, error) {
|
||||
if opts.Page <= 0 {
|
||||
opts.Page = 1
|
||||
}
|
||||
if opts.PageSize <= 0 {
|
||||
opts.Page = setting.ItemsPerPage
|
||||
}
|
||||
|
||||
countSession := listCommitStatusesStatement(repo, sha, opts)
|
||||
countSession = db.SetSessionPagination(countSession, opts)
|
||||
maxResults, err := countSession.Count(new(CommitStatus))
|
||||
if err != nil {
|
||||
log.Error("Count PRs: %v", err)
|
||||
return nil, maxResults, err
|
||||
}
|
||||
|
||||
statuses := make([]*CommitStatus, 0, opts.PageSize)
|
||||
findSession := listCommitStatusesStatement(repo, sha, opts)
|
||||
findSession = db.SetSessionPagination(findSession, opts)
|
||||
sortCommitStatusesSession(findSession, opts.SortType)
|
||||
return statuses, maxResults, findSession.Find(&statuses)
|
||||
}
|
||||
|
||||
func listCommitStatusesStatement(repo *repo_model.Repository, sha string, opts *CommitStatusOptions) *xorm.Session {
|
||||
sess := db.GetEngine(db.DefaultContext).Where("repo_id = ?", repo.ID).And("sha = ?", sha)
|
||||
switch opts.State {
|
||||
case "pending", "success", "error", "failure", "warning":
|
||||
sess.And("state = ?", opts.State)
|
||||
}
|
||||
return sess
|
||||
}
|
||||
|
||||
func sortCommitStatusesSession(sess *xorm.Session, sortType string) {
|
||||
switch sortType {
|
||||
case "oldest":
|
||||
sess.Asc("created_unix")
|
||||
case "recentupdate":
|
||||
sess.Desc("updated_unix")
|
||||
case "leastupdate":
|
||||
sess.Asc("updated_unix")
|
||||
case "leastindex":
|
||||
sess.Desc("index")
|
||||
case "highestindex":
|
||||
sess.Asc("index")
|
||||
default:
|
||||
sess.Desc("created_unix")
|
||||
}
|
||||
}
|
||||
|
||||
// CommitStatusIndex represents a table for commit status index
|
||||
type CommitStatusIndex struct {
|
||||
ID int64
|
||||
RepoID int64 `xorm:"unique(repo_sha)"`
|
||||
SHA string `xorm:"unique(repo_sha)"`
|
||||
MaxIndex int64 `xorm:"index"`
|
||||
}
|
||||
|
||||
// GetLatestCommitStatus returns all statuses with a unique context for a given commit.
|
||||
func GetLatestCommitStatus(ctx context.Context, repoID int64, sha string, listOptions db.ListOptions) ([]*CommitStatus, int64, error) {
|
||||
ids := make([]int64, 0, 10)
|
||||
sess := db.GetEngine(ctx).Table(&CommitStatus{}).
|
||||
Where("repo_id = ?", repoID).And("sha = ?", sha).
|
||||
Select("max( id ) as id").
|
||||
GroupBy("context_hash").OrderBy("max( id ) desc")
|
||||
|
||||
sess = db.SetSessionPagination(sess, &listOptions)
|
||||
|
||||
count, err := sess.FindAndCount(&ids)
|
||||
if err != nil {
|
||||
return nil, count, err
|
||||
}
|
||||
statuses := make([]*CommitStatus, 0, len(ids))
|
||||
if len(ids) == 0 {
|
||||
return statuses, count, nil
|
||||
}
|
||||
return statuses, count, db.GetEngine(ctx).In("id", ids).Find(&statuses)
|
||||
}
|
||||
|
||||
// FindRepoRecentCommitStatusContexts returns repository's recent commit status contexts
|
||||
func FindRepoRecentCommitStatusContexts(repoID int64, before time.Duration) ([]string, error) {
|
||||
start := timeutil.TimeStampNow().AddDuration(-before)
|
||||
ids := make([]int64, 0, 10)
|
||||
if err := db.GetEngine(db.DefaultContext).Table("commit_status").
|
||||
Where("repo_id = ?", repoID).
|
||||
And("updated_unix >= ?", start).
|
||||
Select("max( id ) as id").
|
||||
GroupBy("context_hash").OrderBy("max( id ) desc").
|
||||
Find(&ids); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
contexts := make([]string, 0, len(ids))
|
||||
if len(ids) == 0 {
|
||||
return contexts, nil
|
||||
}
|
||||
return contexts, db.GetEngine(db.DefaultContext).Select("context").Table("commit_status").In("id", ids).Find(&contexts)
|
||||
}
|
||||
|
||||
// NewCommitStatusOptions holds options for creating a CommitStatus
|
||||
type NewCommitStatusOptions struct {
|
||||
Repo *repo_model.Repository
|
||||
Creator *user_model.User
|
||||
SHA string
|
||||
CommitStatus *CommitStatus
|
||||
}
|
||||
|
||||
// NewCommitStatus save commit statuses into database
|
||||
func NewCommitStatus(opts NewCommitStatusOptions) error {
|
||||
if opts.Repo == nil {
|
||||
return fmt.Errorf("NewCommitStatus[nil, %s]: no repository specified", opts.SHA)
|
||||
}
|
||||
|
||||
repoPath := opts.Repo.RepoPath()
|
||||
if opts.Creator == nil {
|
||||
return fmt.Errorf("NewCommitStatus[%s, %s]: no user specified", repoPath, opts.SHA)
|
||||
}
|
||||
|
||||
// Get the next Status Index
|
||||
idx, err := GetNextCommitStatusIndex(opts.Repo.ID, opts.SHA)
|
||||
if err != nil {
|
||||
return fmt.Errorf("generate commit status index failed: %v", err)
|
||||
}
|
||||
|
||||
ctx, committer, err := db.TxContext()
|
||||
if err != nil {
|
||||
return fmt.Errorf("NewCommitStatus[repo_id: %d, user_id: %d, sha: %s]: %v", opts.Repo.ID, opts.Creator.ID, opts.SHA, err)
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
opts.CommitStatus.Description = strings.TrimSpace(opts.CommitStatus.Description)
|
||||
opts.CommitStatus.Context = strings.TrimSpace(opts.CommitStatus.Context)
|
||||
opts.CommitStatus.TargetURL = strings.TrimSpace(opts.CommitStatus.TargetURL)
|
||||
opts.CommitStatus.SHA = opts.SHA
|
||||
opts.CommitStatus.CreatorID = opts.Creator.ID
|
||||
opts.CommitStatus.RepoID = opts.Repo.ID
|
||||
opts.CommitStatus.Index = idx
|
||||
log.Debug("NewCommitStatus[%s, %s]: %d", repoPath, opts.SHA, opts.CommitStatus.Index)
|
||||
|
||||
opts.CommitStatus.ContextHash = hashCommitStatusContext(opts.CommitStatus.Context)
|
||||
|
||||
// Insert new CommitStatus
|
||||
if _, err = db.GetEngine(ctx).Insert(opts.CommitStatus); err != nil {
|
||||
return fmt.Errorf("Insert CommitStatus[%s, %s]: %v", repoPath, opts.SHA, err)
|
||||
}
|
||||
|
||||
return committer.Commit()
|
||||
}
|
||||
|
||||
// SignCommitWithStatuses represents a commit with validation of signature and status state.
|
||||
type SignCommitWithStatuses struct {
|
||||
Status *CommitStatus
|
||||
Statuses []*CommitStatus
|
||||
*asymkey_model.SignCommit
|
||||
}
|
||||
|
||||
// ParseCommitsWithStatus checks commits latest statuses and calculates its worst status state
|
||||
func ParseCommitsWithStatus(oldCommits []*asymkey_model.SignCommit, repo *repo_model.Repository) []*SignCommitWithStatuses {
|
||||
newCommits := make([]*SignCommitWithStatuses, 0, len(oldCommits))
|
||||
|
||||
for _, c := range oldCommits {
|
||||
commit := &SignCommitWithStatuses{
|
||||
SignCommit: c,
|
||||
}
|
||||
statuses, _, err := GetLatestCommitStatus(db.DefaultContext, repo.ID, commit.ID.String(), db.ListOptions{})
|
||||
if err != nil {
|
||||
log.Error("GetLatestCommitStatus: %v", err)
|
||||
} else {
|
||||
commit.Statuses = statuses
|
||||
commit.Status = CalcCommitStatus(statuses)
|
||||
}
|
||||
|
||||
newCommits = append(newCommits, commit)
|
||||
}
|
||||
return newCommits
|
||||
}
|
||||
|
||||
// hashCommitStatusContext hash context
|
||||
func hashCommitStatusContext(context string) string {
|
||||
return fmt.Sprintf("%x", sha1.Sum([]byte(context)))
|
||||
}
|
||||
|
||||
// ConvertFromGitCommit converts git commits into SignCommitWithStatuses
|
||||
func ConvertFromGitCommit(commits []*git.Commit, repo *repo_model.Repository) []*SignCommitWithStatuses {
|
||||
return ParseCommitsWithStatus(
|
||||
asymkey_model.ParseCommitsWithSignature(
|
||||
user_model.ValidateCommitsWithEmails(commits),
|
||||
repo.GetTrustModel(),
|
||||
func(user *user_model.User) (bool, error) {
|
||||
return repo_model.IsOwnerMemberCollaborator(repo, user.ID)
|
||||
},
|
||||
),
|
||||
repo,
|
||||
)
|
||||
}
|
50
models/git/commit_status_test.go
Normal file
50
models/git/commit_status_test.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2017 Gitea. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package git_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
git_model "code.gitea.io/gitea/models/git"
|
||||
repo_model "code.gitea.io/gitea/models/repo"
|
||||
"code.gitea.io/gitea/models/unittest"
|
||||
"code.gitea.io/gitea/modules/structs"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetCommitStatuses(t *testing.T) {
|
||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||
|
||||
repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}).(*repo_model.Repository)
|
||||
|
||||
sha1 := "1234123412341234123412341234123412341234"
|
||||
|
||||
statuses, maxResults, err := git_model.GetCommitStatuses(repo1, sha1, &git_model.CommitStatusOptions{ListOptions: db.ListOptions{Page: 1, PageSize: 50}})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int(maxResults), 5)
|
||||
assert.Len(t, statuses, 5)
|
||||
|
||||
assert.Equal(t, "ci/awesomeness", statuses[0].Context)
|
||||
assert.Equal(t, structs.CommitStatusPending, statuses[0].State)
|
||||
assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[0].APIURL())
|
||||
|
||||
assert.Equal(t, "cov/awesomeness", statuses[1].Context)
|
||||
assert.Equal(t, structs.CommitStatusWarning, statuses[1].State)
|
||||
assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[1].APIURL())
|
||||
|
||||
assert.Equal(t, "cov/awesomeness", statuses[2].Context)
|
||||
assert.Equal(t, structs.CommitStatusSuccess, statuses[2].State)
|
||||
assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[2].APIURL())
|
||||
|
||||
assert.Equal(t, "ci/awesomeness", statuses[3].Context)
|
||||
assert.Equal(t, structs.CommitStatusFailure, statuses[3].State)
|
||||
assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[3].APIURL())
|
||||
|
||||
assert.Equal(t, "deploy/awesomeness", statuses[4].Context)
|
||||
assert.Equal(t, structs.CommitStatusError, statuses[4].State)
|
||||
assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[4].APIURL())
|
||||
}
|
328
models/git/lfs.go
Normal file
328
models/git/lfs.go
Normal file
|
@ -0,0 +1,328 @@
|
|||
// Copyright 2020 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package git
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
"code.gitea.io/gitea/models/perm"
|
||||
repo_model "code.gitea.io/gitea/models/repo"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/lfs"
|
||||
"code.gitea.io/gitea/modules/log"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
|
||||
"xorm.io/builder"
|
||||
)
|
||||
|
||||
// ErrLFSLockNotExist represents a "LFSLockNotExist" kind of error.
|
||||
type ErrLFSLockNotExist struct {
|
||||
ID int64
|
||||
RepoID int64
|
||||
Path string
|
||||
}
|
||||
|
||||
// IsErrLFSLockNotExist checks if an error is a ErrLFSLockNotExist.
|
||||
func IsErrLFSLockNotExist(err error) bool {
|
||||
_, ok := err.(ErrLFSLockNotExist)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (err ErrLFSLockNotExist) Error() string {
|
||||
return fmt.Sprintf("lfs lock does not exist [id: %d, rid: %d, path: %s]", err.ID, err.RepoID, err.Path)
|
||||
}
|
||||
|
||||
// ErrLFSUnauthorizedAction represents a "LFSUnauthorizedAction" kind of error.
|
||||
type ErrLFSUnauthorizedAction struct {
|
||||
RepoID int64
|
||||
UserName string
|
||||
Mode perm.AccessMode
|
||||
}
|
||||
|
||||
// IsErrLFSUnauthorizedAction checks if an error is a ErrLFSUnauthorizedAction.
|
||||
func IsErrLFSUnauthorizedAction(err error) bool {
|
||||
_, ok := err.(ErrLFSUnauthorizedAction)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (err ErrLFSUnauthorizedAction) Error() string {
|
||||
if err.Mode == perm.AccessModeWrite {
|
||||
return fmt.Sprintf("User %s doesn't have write access for lfs lock [rid: %d]", err.UserName, err.RepoID)
|
||||
}
|
||||
return fmt.Sprintf("User %s doesn't have read access for lfs lock [rid: %d]", err.UserName, err.RepoID)
|
||||
}
|
||||
|
||||
// ErrLFSLockAlreadyExist represents a "LFSLockAlreadyExist" kind of error.
|
||||
type ErrLFSLockAlreadyExist struct {
|
||||
RepoID int64
|
||||
Path string
|
||||
}
|
||||
|
||||
// IsErrLFSLockAlreadyExist checks if an error is a ErrLFSLockAlreadyExist.
|
||||
func IsErrLFSLockAlreadyExist(err error) bool {
|
||||
_, ok := err.(ErrLFSLockAlreadyExist)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (err ErrLFSLockAlreadyExist) Error() string {
|
||||
return fmt.Sprintf("lfs lock already exists [rid: %d, path: %s]", err.RepoID, err.Path)
|
||||
}
|
||||
|
||||
// ErrLFSFileLocked represents a "LFSFileLocked" kind of error.
|
||||
type ErrLFSFileLocked struct {
|
||||
RepoID int64
|
||||
Path string
|
||||
UserName string
|
||||
}
|
||||
|
||||
// IsErrLFSFileLocked checks if an error is a ErrLFSFileLocked.
|
||||
func IsErrLFSFileLocked(err error) bool {
|
||||
_, ok := err.(ErrLFSFileLocked)
|
||||
return ok
|
||||
}
|
||||
|
||||
func (err ErrLFSFileLocked) Error() string {
|
||||
return fmt.Sprintf("File is lfs locked [repo: %d, locked by: %s, path: %s]", err.RepoID, err.UserName, err.Path)
|
||||
}
|
||||
|
||||
// LFSMetaObject stores metadata for LFS tracked files.
|
||||
type LFSMetaObject struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
lfs.Pointer `xorm:"extends"`
|
||||
RepositoryID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
|
||||
Existing bool `xorm:"-"`
|
||||
CreatedUnix timeutil.TimeStamp `xorm:"created"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
db.RegisterModel(new(LFSMetaObject))
|
||||
}
|
||||
|
||||
// LFSTokenResponse defines the JSON structure in which the JWT token is stored.
|
||||
// This structure is fetched via SSH and passed by the Git LFS client to the server
|
||||
// endpoint for authorization.
|
||||
type LFSTokenResponse struct {
|
||||
Header map[string]string `json:"header"`
|
||||
Href string `json:"href"`
|
||||
}
|
||||
|
||||
// ErrLFSObjectNotExist is returned from lfs models functions in order
|
||||
// to differentiate between database and missing object errors.
|
||||
var ErrLFSObjectNotExist = errors.New("LFS Meta object does not exist")
|
||||
|
||||
// NewLFSMetaObject stores a given populated LFSMetaObject structure in the database
|
||||
// if it is not already present.
|
||||
func NewLFSMetaObject(m *LFSMetaObject) (*LFSMetaObject, error) {
|
||||
var err error
|
||||
|
||||
ctx, committer, err := db.TxContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
has, err := db.GetByBean(ctx, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if has {
|
||||
m.Existing = true
|
||||
return m, committer.Commit()
|
||||
}
|
||||
|
||||
if err = db.Insert(ctx, m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return m, committer.Commit()
|
||||
}
|
||||
|
||||
// GetLFSMetaObjectByOid selects a LFSMetaObject entry from database by its OID.
|
||||
// It may return ErrLFSObjectNotExist or a database error. If the error is nil,
|
||||
// the returned pointer is a valid LFSMetaObject.
|
||||
func GetLFSMetaObjectByOid(repoID int64, oid string) (*LFSMetaObject, error) {
|
||||
if len(oid) == 0 {
|
||||
return nil, ErrLFSObjectNotExist
|
||||
}
|
||||
|
||||
m := &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}, RepositoryID: repoID}
|
||||
has, err := db.GetEngine(db.DefaultContext).Get(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !has {
|
||||
return nil, ErrLFSObjectNotExist
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// RemoveLFSMetaObjectByOid removes a LFSMetaObject entry from database by its OID.
|
||||
// It may return ErrLFSObjectNotExist or a database error.
|
||||
func RemoveLFSMetaObjectByOid(repoID int64, oid string) (int64, error) {
|
||||
if len(oid) == 0 {
|
||||
return 0, ErrLFSObjectNotExist
|
||||
}
|
||||
|
||||
ctx, committer, err := db.TxContext()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
m := &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}, RepositoryID: repoID}
|
||||
if _, err := db.DeleteByBean(ctx, m); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
count, err := db.CountByBean(ctx, &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
|
||||
if err != nil {
|
||||
return count, err
|
||||
}
|
||||
|
||||
return count, committer.Commit()
|
||||
}
|
||||
|
||||
// GetLFSMetaObjects returns all LFSMetaObjects associated with a repository
|
||||
func GetLFSMetaObjects(repoID int64, page, pageSize int) ([]*LFSMetaObject, error) {
|
||||
sess := db.GetEngine(db.DefaultContext)
|
||||
|
||||
if page >= 0 && pageSize > 0 {
|
||||
start := 0
|
||||
if page > 0 {
|
||||
start = (page - 1) * pageSize
|
||||
}
|
||||
sess.Limit(pageSize, start)
|
||||
}
|
||||
lfsObjects := make([]*LFSMetaObject, 0, pageSize)
|
||||
return lfsObjects, sess.Find(&lfsObjects, &LFSMetaObject{RepositoryID: repoID})
|
||||
}
|
||||
|
||||
// CountLFSMetaObjects returns a count of all LFSMetaObjects associated with a repository
|
||||
func CountLFSMetaObjects(repoID int64) (int64, error) {
|
||||
return db.GetEngine(db.DefaultContext).Count(&LFSMetaObject{RepositoryID: repoID})
|
||||
}
|
||||
|
||||
// LFSObjectAccessible checks if a provided Oid is accessible to the user
|
||||
func LFSObjectAccessible(user *user_model.User, oid string) (bool, error) {
|
||||
if user.IsAdmin {
|
||||
count, err := db.GetEngine(db.DefaultContext).Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
|
||||
return count > 0, err
|
||||
}
|
||||
cond := repo_model.AccessibleRepositoryCondition(user)
|
||||
count, err := db.GetEngine(db.DefaultContext).Where(cond).Join("INNER", "repository", "`lfs_meta_object`.repository_id = `repository`.id").Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
|
||||
return count > 0, err
|
||||
}
|
||||
|
||||
// LFSObjectIsAssociated checks if a provided Oid is associated
|
||||
func LFSObjectIsAssociated(oid string) (bool, error) {
|
||||
return db.GetEngine(db.DefaultContext).Exist(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
|
||||
}
|
||||
|
||||
// LFSAutoAssociate auto associates accessible LFSMetaObjects
|
||||
func LFSAutoAssociate(metas []*LFSMetaObject, user *user_model.User, repoID int64) error {
|
||||
ctx, committer, err := db.TxContext()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
sess := db.GetEngine(ctx)
|
||||
|
||||
oids := make([]interface{}, len(metas))
|
||||
oidMap := make(map[string]*LFSMetaObject, len(metas))
|
||||
for i, meta := range metas {
|
||||
oids[i] = meta.Oid
|
||||
oidMap[meta.Oid] = meta
|
||||
}
|
||||
|
||||
if !user.IsAdmin {
|
||||
newMetas := make([]*LFSMetaObject, 0, len(metas))
|
||||
cond := builder.In(
|
||||
"`lfs_meta_object`.repository_id",
|
||||
builder.Select("`repository`.id").From("repository").Where(repo_model.AccessibleRepositoryCondition(user)),
|
||||
)
|
||||
err = sess.Cols("oid").Where(cond).In("oid", oids...).GroupBy("oid").Find(&newMetas)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(newMetas) != len(oidMap) {
|
||||
return fmt.Errorf("unable collect all LFS objects from database, expected %d, actually %d", len(oidMap), len(newMetas))
|
||||
}
|
||||
for i := range newMetas {
|
||||
newMetas[i].Size = oidMap[newMetas[i].Oid].Size
|
||||
newMetas[i].RepositoryID = repoID
|
||||
}
|
||||
if err = db.Insert(ctx, newMetas); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// admin can associate any LFS object to any repository, and we do not care about errors (eg: duplicated unique key),
|
||||
// even if error occurs, it won't hurt users and won't make things worse
|
||||
for i := range metas {
|
||||
p := lfs.Pointer{Oid: metas[i].Oid, Size: metas[i].Size}
|
||||
_, err = sess.Insert(&LFSMetaObject{
|
||||
Pointer: p,
|
||||
RepositoryID: repoID,
|
||||
})
|
||||
if err != nil {
|
||||
log.Warn("failed to insert LFS meta object %-v for repo_id: %d into database, err=%v", p, repoID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return committer.Commit()
|
||||
}
|
||||
|
||||
// IterateLFS iterates lfs object
|
||||
func IterateLFS(f func(mo *LFSMetaObject) error) error {
|
||||
var start int
|
||||
const batchSize = 100
|
||||
e := db.GetEngine(db.DefaultContext)
|
||||
for {
|
||||
mos := make([]*LFSMetaObject, 0, batchSize)
|
||||
if err := e.Limit(batchSize, start).Find(&mos); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(mos) == 0 {
|
||||
return nil
|
||||
}
|
||||
start += len(mos)
|
||||
|
||||
for _, mo := range mos {
|
||||
if err := f(mo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CopyLFS copies LFS data from one repo to another
|
||||
func CopyLFS(ctx context.Context, newRepo, oldRepo *repo_model.Repository) error {
|
||||
var lfsObjects []*LFSMetaObject
|
||||
if err := db.GetEngine(ctx).Where("repository_id=?", oldRepo.ID).Find(&lfsObjects); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, v := range lfsObjects {
|
||||
v.ID = 0
|
||||
v.RepositoryID = newRepo.ID
|
||||
if err := db.Insert(ctx, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetRepoLFSSize return a repository's lfs files size
|
||||
func GetRepoLFSSize(ctx context.Context, repoID int64) (int64, error) {
|
||||
lfsSize, err := db.GetEngine(ctx).Where("repository_id = ?", repoID).SumInt(new(LFSMetaObject), "size")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("updateSize: GetLFSMetaObjects: %v", err)
|
||||
}
|
||||
return lfsSize, nil
|
||||
}
|
183
models/git/lfs_lock.go
Normal file
183
models/git/lfs_lock.go
Normal file
|
@ -0,0 +1,183 @@
|
|||
// Copyright 2017 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package git
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
"code.gitea.io/gitea/models/perm"
|
||||
access_model "code.gitea.io/gitea/models/perm/access"
|
||||
repo_model "code.gitea.io/gitea/models/repo"
|
||||
"code.gitea.io/gitea/models/unit"
|
||||
user_model "code.gitea.io/gitea/models/user"
|
||||
"code.gitea.io/gitea/modules/setting"
|
||||
)
|
||||
|
||||
// LFSLock represents a git lfs lock of repository.
|
||||
type LFSLock struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
RepoID int64 `xorm:"INDEX NOT NULL"`
|
||||
OwnerID int64 `xorm:"INDEX NOT NULL"`
|
||||
Path string `xorm:"TEXT"`
|
||||
Created time.Time `xorm:"created"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
db.RegisterModel(new(LFSLock))
|
||||
}
|
||||
|
||||
// BeforeInsert is invoked from XORM before inserting an object of this type.
|
||||
func (l *LFSLock) BeforeInsert() {
|
||||
l.Path = cleanPath(l.Path)
|
||||
}
|
||||
|
||||
func cleanPath(p string) string {
|
||||
return path.Clean("/" + p)[1:]
|
||||
}
|
||||
|
||||
// CreateLFSLock creates a new lock.
|
||||
func CreateLFSLock(repo *repo_model.Repository, lock *LFSLock) (*LFSLock, error) {
|
||||
dbCtx, committer, err := db.TxContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
if err := CheckLFSAccessForRepo(dbCtx, lock.OwnerID, repo, perm.AccessModeWrite); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lock.Path = cleanPath(lock.Path)
|
||||
lock.RepoID = repo.ID
|
||||
|
||||
l, err := GetLFSLock(dbCtx, repo, lock.Path)
|
||||
if err == nil {
|
||||
return l, ErrLFSLockAlreadyExist{lock.RepoID, lock.Path}
|
||||
}
|
||||
if !IsErrLFSLockNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := db.Insert(dbCtx, lock); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lock, committer.Commit()
|
||||
}
|
||||
|
||||
// GetLFSLock returns release by given path.
|
||||
func GetLFSLock(ctx context.Context, repo *repo_model.Repository, path string) (*LFSLock, error) {
|
||||
path = cleanPath(path)
|
||||
rel := &LFSLock{RepoID: repo.ID}
|
||||
has, err := db.GetEngine(ctx).Where("lower(path) = ?", strings.ToLower(path)).Get(rel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !has {
|
||||
return nil, ErrLFSLockNotExist{0, repo.ID, path}
|
||||
}
|
||||
return rel, nil
|
||||
}
|
||||
|
||||
// GetLFSLockByID returns release by given id.
|
||||
func GetLFSLockByID(ctx context.Context, id int64) (*LFSLock, error) {
|
||||
lock := new(LFSLock)
|
||||
has, err := db.GetEngine(ctx).ID(id).Get(lock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !has {
|
||||
return nil, ErrLFSLockNotExist{id, 0, ""}
|
||||
}
|
||||
return lock, nil
|
||||
}
|
||||
|
||||
// GetLFSLockByRepoID returns a list of locks of repository.
|
||||
func GetLFSLockByRepoID(repoID int64, page, pageSize int) ([]*LFSLock, error) {
|
||||
e := db.GetEngine(db.DefaultContext)
|
||||
if page >= 0 && pageSize > 0 {
|
||||
start := 0
|
||||
if page > 0 {
|
||||
start = (page - 1) * pageSize
|
||||
}
|
||||
e.Limit(pageSize, start)
|
||||
}
|
||||
lfsLocks := make([]*LFSLock, 0, pageSize)
|
||||
return lfsLocks, e.Find(&lfsLocks, &LFSLock{RepoID: repoID})
|
||||
}
|
||||
|
||||
// GetTreePathLock returns LSF lock for the treePath
|
||||
func GetTreePathLock(repoID int64, treePath string) (*LFSLock, error) {
|
||||
if !setting.LFS.StartServer {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
locks, err := GetLFSLockByRepoID(repoID, 0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, lock := range locks {
|
||||
if lock.Path == treePath {
|
||||
return lock, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// CountLFSLockByRepoID returns a count of all LFSLocks associated with a repository.
|
||||
func CountLFSLockByRepoID(repoID int64) (int64, error) {
|
||||
return db.GetEngine(db.DefaultContext).Count(&LFSLock{RepoID: repoID})
|
||||
}
|
||||
|
||||
// DeleteLFSLockByID deletes a lock by given ID.
|
||||
func DeleteLFSLockByID(id int64, repo *repo_model.Repository, u *user_model.User, force bool) (*LFSLock, error) {
|
||||
dbCtx, committer, err := db.TxContext()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer committer.Close()
|
||||
|
||||
lock, err := GetLFSLockByID(dbCtx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := CheckLFSAccessForRepo(dbCtx, u.ID, repo, perm.AccessModeWrite); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !force && u.ID != lock.OwnerID {
|
||||
return nil, fmt.Errorf("user doesn't own lock and force flag is not set")
|
||||
}
|
||||
|
||||
if _, err := db.GetEngine(dbCtx).ID(id).Delete(new(LFSLock)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return lock, committer.Commit()
|
||||
}
|
||||
|
||||
// CheckLFSAccessForRepo check needed access mode base on action
|
||||
func CheckLFSAccessForRepo(ctx context.Context, ownerID int64, repo *repo_model.Repository, mode perm.AccessMode) error {
|
||||
if ownerID == 0 {
|
||||
return ErrLFSUnauthorizedAction{repo.ID, "undefined", mode}
|
||||
}
|
||||
u, err := user_model.GetUserByIDCtx(ctx, ownerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
perm, err := access_model.GetUserRepoPermission(ctx, repo, u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !perm.CanAccess(mode, unit.TypeCode) {
|
||||
return ErrLFSUnauthorizedAction{repo.ID, u.DisplayName(), mode}
|
||||
}
|
||||
return nil
|
||||
}
|
18
models/git/main_test.go
Normal file
18
models/git/main_test.go
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2020 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package git_test
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"code.gitea.io/gitea/models/unittest"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
unittest.MainTest(m, &unittest.TestOptions{
|
||||
GiteaRootPath: filepath.Join("..", ".."),
|
||||
})
|
||||
}
|
137
models/git/protected_tag.go
Normal file
137
models/git/protected_tag.go
Normal file
|
@ -0,0 +1,137 @@
|
|||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package git
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"code.gitea.io/gitea/models/db"
|
||||
"code.gitea.io/gitea/models/organization"
|
||||
"code.gitea.io/gitea/modules/base"
|
||||
"code.gitea.io/gitea/modules/timeutil"
|
||||
|
||||
"github.com/gobwas/glob"
|
||||
)
|
||||
|
||||
// ProtectedTag struct
|
||||
type ProtectedTag struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
RepoID int64
|
||||
NamePattern string
|
||||
RegexPattern *regexp.Regexp `xorm:"-"`
|
||||
GlobPattern glob.Glob `xorm:"-"`
|
||||
AllowlistUserIDs []int64 `xorm:"JSON TEXT"`
|
||||
AllowlistTeamIDs []int64 `xorm:"JSON TEXT"`
|
||||
|
||||
CreatedUnix timeutil.TimeStamp `xorm:"created"`
|
||||
UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
db.RegisterModel(new(ProtectedTag))
|
||||
}
|
||||
|
||||
// EnsureCompiledPattern ensures the glob pattern is compiled
|
||||
func (pt *ProtectedTag) EnsureCompiledPattern() error {
|
||||
if pt.RegexPattern != nil || pt.GlobPattern != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
if len(pt.NamePattern) >= 2 && strings.HasPrefix(pt.NamePattern, "/") && strings.HasSuffix(pt.NamePattern, "/") {
|
||||
pt.RegexPattern, err = regexp.Compile(pt.NamePattern[1 : len(pt.NamePattern)-1])
|
||||
} else {
|
||||
pt.GlobPattern, err = glob.Compile(pt.NamePattern)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (pt *ProtectedTag) matchString(name string) bool {
|
||||
if pt.RegexPattern != nil {
|
||||
return pt.RegexPattern.MatchString(name)
|
||||
}
|
||||
return pt.GlobPattern.Match(name)
|
||||
}
|
||||
|
||||
// InsertProtectedTag inserts a protected tag to database
|
||||
func InsertProtectedTag(pt *ProtectedTag) error {
|
||||
_, err := db.GetEngine(db.DefaultContext).Insert(pt)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateProtectedTag updates the protected tag
|
||||
func UpdateProtectedTag(pt *ProtectedTag) error {
|
||||
_, err := db.GetEngine(db.DefaultContext).ID(pt.ID).AllCols().Update(pt)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteProtectedTag deletes a protected tag by ID
|
||||
func DeleteProtectedTag(pt *ProtectedTag) error {
|
||||
_, err := db.GetEngine(db.DefaultContext).ID(pt.ID).Delete(&ProtectedTag{})
|
||||
return err
|
||||
}
|
||||
|
||||
// IsUserAllowedModifyTag returns true if the user is allowed to modify the tag
|
||||
func IsUserAllowedModifyTag(pt *ProtectedTag, userID int64) (bool, error) {
|
||||
if base.Int64sContains(pt.AllowlistUserIDs, userID) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if len(pt.AllowlistTeamIDs) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
in, err := organization.IsUserInTeams(db.DefaultContext, userID, pt.AllowlistTeamIDs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return in, nil
|
||||
}
|
||||
|
||||
// GetProtectedTags gets all protected tags of the repository
|
||||
func GetProtectedTags(repoID int64) ([]*ProtectedTag, error) {
|
||||
tags := make([]*ProtectedTag, 0)
|
||||
return tags, db.GetEngine(db.DefaultContext).Find(&tags, &ProtectedTag{RepoID: repoID})
|
||||
}
|
||||
|
||||
// GetProtectedTagByID gets the protected tag with the specific id
|
||||
func GetProtectedTagByID(id int64) (*ProtectedTag, error) {
|
||||
tag := new(ProtectedTag)
|
||||
has, err := db.GetEngine(db.DefaultContext).ID(id).Get(tag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !has {
|
||||
return nil, nil
|
||||
}
|
||||
return tag, nil
|
||||
}
|
||||
|
||||
// IsUserAllowedToControlTag checks if a user can control the specific tag.
|
||||
// It returns true if the tag name is not protected or the user is allowed to control it.
|
||||
func IsUserAllowedToControlTag(tags []*ProtectedTag, tagName string, userID int64) (bool, error) {
|
||||
isAllowed := true
|
||||
for _, tag := range tags {
|
||||
err := tag.EnsureCompiledPattern()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !tag.matchString(tagName) {
|
||||
continue
|
||||
}
|
||||
|
||||
isAllowed, err = IsUserAllowedModifyTag(tag, userID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if isAllowed {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return isAllowed, nil
|
||||
}
|
165
models/git/protected_tag_test.go
Normal file
165
models/git/protected_tag_test.go
Normal file
|
@ -0,0 +1,165 @@
|
|||
// Copyright 2021 The Gitea Authors. All rights reserved.
|
||||
// Use of this source code is governed by a MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package git_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
git_model "code.gitea.io/gitea/models/git"
|
||||
"code.gitea.io/gitea/models/unittest"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestIsUserAllowed(t *testing.T) {
|
||||
assert.NoError(t, unittest.PrepareTestDatabase())
|
||||
|
||||
pt := &git_model.ProtectedTag{}
|
||||
allowed, err := git_model.IsUserAllowedModifyTag(pt, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, allowed)
|
||||
|
||||
pt = &git_model.ProtectedTag{
|
||||
AllowlistUserIDs: []int64{1},
|
||||
}
|
||||
allowed, err = git_model.IsUserAllowedModifyTag(pt, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, allowed)
|
||||
|
||||
allowed, err = git_model.IsUserAllowedModifyTag(pt, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, allowed)
|
||||
|
||||
pt = &git_model.ProtectedTag{
|
||||
AllowlistTeamIDs: []int64{1},
|
||||
}
|
||||
allowed, err = git_model.IsUserAllowedModifyTag(pt, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.False(t, allowed)
|
||||
|
||||
allowed, err = git_model.IsUserAllowedModifyTag(pt, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, allowed)
|
||||
|
||||
pt = &git_model.ProtectedTag{
|
||||
AllowlistUserIDs: []int64{1},
|
||||
AllowlistTeamIDs: []int64{1},
|
||||
}
|
||||
allowed, err = git_model.IsUserAllowedModifyTag(pt, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, allowed)
|
||||
|
||||
allowed, err = git_model.IsUserAllowedModifyTag(pt, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, allowed)
|
||||
}
|
||||
|
||||
func TestIsUserAllowedToControlTag(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
userid int64
|
||||
allowed bool
|
||||
}{
|
||||
{
|
||||
name: "test",
|
||||
userid: 1,
|
||||
allowed: true,
|
||||
},
|
||||
{
|
||||
name: "test",
|
||||
userid: 3,
|
||||
allowed: true,
|
||||
},
|
||||
{
|
||||
name: "gitea",
|
||||
userid: 1,
|
||||
allowed: true,
|
||||
},
|
||||
{
|
||||
name: "gitea",
|
||||
userid: 3,
|
||||
allowed: false,
|
||||
},
|
||||
{
|
||||
name: "test-gitea",
|
||||
userid: 1,
|
||||
allowed: true,
|
||||
},
|
||||
{
|
||||
name: "test-gitea",
|
||||
userid: 3,
|
||||
allowed: false,
|
||||
},
|
||||
{
|
||||
name: "gitea-test",
|
||||
userid: 1,
|
||||
allowed: true,
|
||||
},
|
||||
{
|
||||
name: "gitea-test",
|
||||
userid: 3,
|
||||
allowed: true,
|
||||
},
|
||||
{
|
||||
name: "v-1",
|
||||
userid: 1,
|
||||
allowed: false,
|
||||
},
|
||||
{
|
||||
name: "v-1",
|
||||
userid: 2,
|
||||
allowed: true,
|
||||
},
|
||||
{
|
||||
name: "release",
|
||||
userid: 1,
|
||||
allowed: false,
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("Glob", func(t *testing.T) {
|
||||
protectedTags := []*git_model.ProtectedTag{
|
||||
{
|
||||
NamePattern: `*gitea`,
|
||||
AllowlistUserIDs: []int64{1},
|
||||
},
|
||||
{
|
||||
NamePattern: `v-*`,
|
||||
AllowlistUserIDs: []int64{2},
|
||||
},
|
||||
{
|
||||
NamePattern: "release",
|
||||
},
|
||||
}
|
||||
|
||||
for n, c := range cases {
|
||||
isAllowed, err := git_model.IsUserAllowedToControlTag(protectedTags, c.name, c.userid)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, c.allowed, isAllowed, "case %d: error should match", n)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Regex", func(t *testing.T) {
|
||||
protectedTags := []*git_model.ProtectedTag{
|
||||
{
|
||||
NamePattern: `/gitea\z/`,
|
||||
AllowlistUserIDs: []int64{1},
|
||||
},
|
||||
{
|
||||
NamePattern: `/\Av-/`,
|
||||
AllowlistUserIDs: []int64{2},
|
||||
},
|
||||
{
|
||||
NamePattern: "/release/",
|
||||
},
|
||||
}
|
||||
|
||||
for n, c := range cases {
|
||||
isAllowed, err := git_model.IsUserAllowedToControlTag(protectedTags, c.name, c.userid)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, c.allowed, isAllowed, "case %d: error should match", n)
|
||||
}
|
||||
})
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue