@ -99,38 +99,8 @@ func UploadRepoFiles(repo *models.Repository, doer *models.User, opts *UploadRep
}
}
// Copy uploaded files into repository.
// Copy uploaded files into repository.
for i , uploadInfo := range infos {
for i := range infos {
file , err := os . Open ( uploadInfo . upload . LocalPath ( ) )
if err := copyUploadedLFSFileIntoRepository ( & infos [ i ] , filename2attribute2info , t , opts . TreePath ) ; err != nil {
if err != nil {
return err
}
defer file . Close ( )
var objectHash string
if setting . LFS . StartServer && filename2attribute2info [ uploadInfo . upload . Name ] != nil && filename2attribute2info [ uploadInfo . upload . Name ] [ "filter" ] == "lfs" {
// Handle LFS
// FIXME: Inefficient! this should probably happen in models.Upload
oid , err := models . GenerateLFSOid ( file )
if err != nil {
return err
}
fileInfo , err := file . Stat ( )
if err != nil {
return err
}
uploadInfo . lfsMetaObject = & models . LFSMetaObject { Oid : oid , Size : fileInfo . Size ( ) , RepositoryID : t . repo . ID }
if objectHash , err = t . HashObject ( strings . NewReader ( uploadInfo . lfsMetaObject . Pointer ( ) ) ) ; err != nil {
return err
}
infos [ i ] = uploadInfo
} else if objectHash , err = t . HashObject ( file ) ; err != nil {
return err
}
// Add the object to the index
if err := t . AddObjectToIndex ( "100644" , objectHash , path . Join ( opts . TreePath , uploadInfo . upload . Name ) ) ; err != nil {
return err
return err
}
}
}
}
@ -152,11 +122,11 @@ func UploadRepoFiles(repo *models.Repository, doer *models.User, opts *UploadRep
}
}
// Now deal with LFS objects
// Now deal with LFS objects
for _ , uploadInfo := range infos {
for i := range infos {
if uploadInfo . lfsMetaObject == nil {
if infos [ i ] . lfsMetaObject == nil {
continue
continue
}
}
uploadInfo . lfsMetaObject , err = models . NewLFSMetaObject ( uploadInfo . lfsMetaObject )
infos [ i ] . lfsMetaObject , err = models . NewLFSMetaObject ( infos [ i ] . lfsMetaObject )
if err != nil {
if err != nil {
// OK Now we need to cleanup
// OK Now we need to cleanup
return cleanUpAfterFailure ( & infos , t , err )
return cleanUpAfterFailure ( & infos , t , err )
@ -182,6 +152,39 @@ func UploadRepoFiles(repo *models.Repository, doer *models.User, opts *UploadRep
return models . DeleteUploads ( uploads ... )
return models . DeleteUploads ( uploads ... )
}
}
func copyUploadedLFSFileIntoRepository ( info * uploadInfo , filename2attribute2info map [ string ] map [ string ] string , t * TemporaryUploadRepository , treePath string ) error {
file , err := os . Open ( info . upload . LocalPath ( ) )
if err != nil {
return err
}
defer file . Close ( )
var objectHash string
if setting . LFS . StartServer && filename2attribute2info [ info . upload . Name ] != nil && filename2attribute2info [ info . upload . Name ] [ "filter" ] == "lfs" {
// Handle LFS
// FIXME: Inefficient! this should probably happen in models.Upload
oid , err := models . GenerateLFSOid ( file )
if err != nil {
return err
}
fileInfo , err := file . Stat ( )
if err != nil {
return err
}
info . lfsMetaObject = & models . LFSMetaObject { Oid : oid , Size : fileInfo . Size ( ) , RepositoryID : t . repo . ID }
if objectHash , err = t . HashObject ( strings . NewReader ( info . lfsMetaObject . Pointer ( ) ) ) ; err != nil {
return err
}
} else if objectHash , err = t . HashObject ( file ) ; err != nil {
return err
}
// Add the object to the index
return t . AddObjectToIndex ( "100644" , objectHash , path . Join ( treePath , info . upload . Name ) )
}
func uploadToLFSContentStore ( info uploadInfo , contentStore * lfs . ContentStore ) error {
func uploadToLFSContentStore ( info uploadInfo , contentStore * lfs . ContentStore ) error {
if info . lfsMetaObject == nil {
if info . lfsMetaObject == nil {
return nil
return nil