From 115e3dab9571682afa9b9baea4648a1f82369fa0 Mon Sep 17 00:00:00 2001 From: jenken827 Date: Sat, 23 Aug 2025 18:44:23 +0800 Subject: [PATCH 01/26] feat:slice upload --- drivers/123_open/driver.go | 62 +++++ drivers/123_open/types.go | 104 +++++++- drivers/123_open/upload.go | 62 +++++ drivers/123_open/util.go | 28 ++- drivers/baidu_netdisk/driver.go | 71 ++++++ drivers/baidu_netdisk/types.go | 46 ++++ drivers/baidu_netdisk/util.go | 32 +++ internal/conf/const.go | 1 + internal/db/db.go | 3 +- internal/db/slice_upload.go | 19 ++ internal/driver/driver.go | 24 ++ internal/fs/fs.go | 344 ++++++++++++++++++++++++++ internal/model/obj.go | 6 + internal/model/reqres/upload.go | 41 +++ internal/model/tables/base.go | 10 + internal/model/tables/slice_upload.go | 61 +++++ internal/model/upload.go | 17 ++ server/handles/fsup.go | 100 ++++++++ server/middlewares/fs.go | 94 +++++++ server/middlewares/fsup.go | 45 ---- server/router.go | 6 + 21 files changed, 1106 insertions(+), 70 deletions(-) create mode 100644 internal/db/slice_upload.go create mode 100644 internal/model/reqres/upload.go create mode 100644 internal/model/tables/base.go create mode 100644 internal/model/tables/slice_upload.go create mode 100644 internal/model/upload.go create mode 100644 server/middlewares/fs.go delete mode 100644 server/middlewares/fsup.go diff --git a/drivers/123_open/driver.go b/drivers/123_open/driver.go index 04785ac1e..9af7da8b4 100644 --- a/drivers/123_open/driver.go +++ b/drivers/123_open/driver.go @@ -3,12 +3,16 @@ package _123_open import ( "context" "fmt" + "io" "strconv" + "strings" "time" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/model/reqres" + "github.com/OpenListTeam/OpenList/v4/internal/model/tables" "github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/pkg/utils" @@ -27,6 +31,13 @@ func (d *Open123) GetAddition() driver.Additional { return &d.Addition } +func (d *Open123) GetUploadInfo() *model.UploadInfo { + return &model.UploadInfo{ + SliceHashNeed: true, + HashMd5Need: true, + } +} + func (d *Open123) Init(ctx context.Context) error { if d.UploadThread < 1 || d.UploadThread > 32 { d.UploadThread = 3 @@ -213,5 +224,56 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre return nil, fmt.Errorf("upload complete timeout") } +// Preup 预上传 +func (d *Open123) Preup(c context.Context, srcobj model.Obj, req *reqres.PreupReq) (*model.PreupInfo, error) { + pid, err := strconv.ParseUint(srcobj.GetID(), 10, 64) + if err != nil { + return nil, err + } + duplicate := 1 + if req.Overwrite { + duplicate = 2 + } + + ucr := &UploadCreateReq{ + ParentFileID: pid, + Etag: req.Hash.Md5, + FileName: req.Name, + Size: int64(req.Size), + Duplicate: duplicate, + } + + resp, err := d.uploadCreate(ucr) + if err != nil { + return nil, err + } + return &model.PreupInfo{ + PreupID: resp.PreuploadID, + Server: resp.Servers[0], + SliceSize: resp.SliceSize, + Reuse: resp.Reuse, + }, nil +} + +// UploadSlice 上传分片 +func (d *Open123) SliceUpload(c context.Context, req *tables.SliceUpload, sliceno uint, fd io.Reader) error { + sh := strings.Split(req.SliceHash, ",") + r := &UploadSliceReq{ + Name: req.Name, + PreuploadID: req.PreupID, + Server: req.Server, + Slice: fd, + SliceMD5: sh[sliceno], + SliceNo: int(sliceno) + 1, + } + return d.uploadSlice(r) +} + +// UploadSliceComplete 分片上传完成 +func (d *Open123) UploadSliceComplete(c context.Context, su *tables.SliceUpload) error { + + return d.sliceUpComplete(su.PreupID) +} + var _ driver.Driver = (*Open123)(nil) var _ driver.PutResult = (*Open123)(nil) diff --git a/drivers/123_open/types.go b/drivers/123_open/types.go index eb08529f1..d6245d58b 100644 --- a/drivers/123_open/types.go +++ b/drivers/123_open/types.go @@ -1,6 +1,7 @@ package _123_open import ( + "io" "strconv" "time" @@ -165,18 +166,6 @@ type DirectLinkResp struct { } `json:"data"` } -// 创建文件V2返回 -type UploadCreateResp struct { - BaseResp - Data struct { - FileID int64 `json:"fileID"` - PreuploadID string `json:"preuploadID"` - Reuse bool `json:"reuse"` - SliceSize int64 `json:"sliceSize"` - Servers []string `json:"servers"` - } `json:"data"` -} - // 上传完毕V2返回 type UploadCompleteResp struct { BaseResp @@ -185,3 +174,94 @@ type UploadCompleteResp struct { FileID int64 `json:"fileID"` } `json:"data"` } + +// UploadCreateReq 预上传请求 +// parentFileID number 必填 父目录id,上传到根目录时填写 0 +// filename string 必填 文件名要小于255个字符且不能包含以下任何字符:"\/:*?|><。(注:不能重名) +// containDir 为 true 时,传入路径+文件名,例如:/你好/123/测试文件.mp4 +// etag string 必填 文件md5 +// size number 必填 文件大小,单位为 byte 字节 +// duplicate number 非必填 当有相同文件名时,文件处理策略(1保留两者,新文件名将自动添加后缀,2覆盖原文件) +// containDir bool 非必填 上传文件是否包含路径,默认false +type UploadCreateReq struct { + ParentFileID uint64 `json:"parentFileID"` + FileName string `json:"filename"` + Etag string `json:"etag"` + Size int64 `json:"size"` + Duplicate int `json:"duplicate"` + ContainDir bool `json:"containDir"` +} + +type UploadCreateResp struct { + BaseResp + Data UploadCreateData `json:"data"` +} + +// UploadCreateData 预上传响应 +// fileID number 非必填 文件ID。当123云盘已有该文件,则会发生秒传。此时会将文件ID字段返回。唯一 +// preuploadID string 必填 预上传ID(如果 reuse 为 true 时,该字段不存在) +// reuse boolean 必填 是否秒传,返回true时表示文件已上传成功 +// sliceSize number 必填 分片大小,必须按此大小生成文件分片再上传 +// servers array 必填 上传地址 +type UploadCreateData struct { + FileID int64 `json:"fileID"` + PreuploadID string `json:"preuploadID"` + Reuse bool `json:"reuse"` + SliceSize int64 `json:"sliceSize"` + Servers []string `json:"servers"` +} + +// UploadSliceReq 分片上传请求 +// preuploadID string 必填 预上传ID +// sliceNo number 必填 分片序号,从1开始自增 +// sliceMD5 string 必填 当前分片md5 +// slice file 必填 分片二进制流 +type UploadSliceReq struct { + Name string `json:"name"` + PreuploadID string `json:"preuploadID"` + SliceNo int `json:"sliceNo"` + SliceMD5 string `json:"sliceMD5"` + Slice io.Reader `json:"slice"` + Server string `json:"server"` +} + +type SliceUpCompleteResp struct { + SingleUploadResp +} + +type GetUploadServerResp struct { + BaseResp + Data []string `json:"data"` +} + +// SingleUploadReq 单文件上传请求 +// parentFileID number 必填 父目录id,上传到根目录时填写 0 +// filename string 必填 文件名要小于255个字符且不能包含以下任何字符:"\/:*?|><。(注:不能重名) +// +// containDir 为 true 时,传入路径+文件名,例如:/你好/123/测试文件.mp4 +// +// etag string 必填 文件md5 +// size number 必填 文件大小,单位为 byte 字节 +// file file 必填 文件二进制流 +// duplicate number 非必填 当有相同文件名时,文件处理策略(1保留两者,新文件名将自动添加后缀,2覆盖原文件) +// containDir bool 非必填 上传文件是否包含路径,默认false +type SingleUploadReq struct { + ParentFileID int64 `json:"parentFileID"` + FileName string `json:"filename"` + Etag string `json:"etag"` + Size int64 `json:"size"` + File io.Reader `json:"file"` + Duplicate int `json:"duplicate"` + ContainDir bool `json:"containDir"` +} + +// SingleUploadResp 单文件上传响应 +type SingleUploadResp struct { + BaseResp + Data SingleUploadData `json:"data"` +} + +type SingleUploadData struct { + FileID int64 `json:"fileID"` + Completed bool `json:"completed"` +} diff --git a/drivers/123_open/upload.go b/drivers/123_open/upload.go index abcde2aaf..6de0088ab 100644 --- a/drivers/123_open/upload.go +++ b/drivers/123_open/upload.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "mime/multipart" @@ -20,6 +21,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/avast/retry-go" "github.com/go-resty/resty/v2" + log "github.com/sirupsen/logrus" ) // 创建文件 V2 @@ -183,3 +185,63 @@ func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) { } return &resp, nil } + +func (d *Open123) uploadSlice(req *UploadSliceReq) error { + _, err := d.Request(InitApiInfo(req.Server+"/upload/v2/file/slice", 0), http.MethodPost, func(rt *resty.Request) { + rt.SetHeader("Content-Type", "multipart/form-data") + rt.SetMultipartFormData(map[string]string{ + "preuploadID": req.PreuploadID, + "sliceMD5": req.SliceMD5, + "sliceNo": strconv.FormatInt(int64(req.SliceNo), 10), + }) + rt.SetMultipartField("slice", req.Name, "multipart/form-data", req.Slice) + }, nil) + return err +} + +func (d *Open123) sliceUpComplete(uploadID string) error { + r := &SliceUpCompleteResp{} + + b, err := d.Request(UploadComplete, http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "preuploadID": uploadID, + }) + }, r) + if err != nil { + log.Error("123 open uploadComplete error", err) + return err + } + log.Infof("upload complete,body: %s", string(b)) + if r.Data.Completed { + return nil + } + + return errors.New("upload uncomplete") + +} + +func (d *Open123) getUploadServer() (string, error) { + r := &GetUploadServerResp{} + body, err := d.Request(UploadFileDomain, "GET", nil, r) + if err != nil { + log.Error("get upload server failed", string(body), r, err) + return "", err + } + if len(r.Data) == 0 { + return "", errors.New("upload server is empty") + } + + return r.Data[0], err +} + +func (d *Open123) uploadCreate(uc *UploadCreateReq) (*UploadCreateData, error) { + r := &UploadCreateResp{} + _, err := d.Request(UploadCreate, http.MethodPost, func(req *resty.Request) { + req.SetBody(uc) + }, r) + if err != nil { + log.Error("123 open uploadCreate error", err) + } + return &r.Data, err + +} diff --git a/drivers/123_open/util.go b/drivers/123_open/util.go index 52bb5ee87..a7ce87010 100644 --- a/drivers/123_open/util.go +++ b/drivers/123_open/util.go @@ -21,18 +21,19 @@ import ( var ( //不同情况下获取的AccessTokenQPS限制不同 如下模块化易于拓展 Api = "https://open-api.123pan.com" - AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1) - RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1) - UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1) - FileList = InitApiInfo(Api+"/api/v2/file/list", 3) - DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 5) - DirectLink = InitApiInfo(Api+"/api/v1/direct-link/url", 5) - Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2) - Move = InitApiInfo(Api+"/api/v1/file/move", 1) - Rename = InitApiInfo(Api+"/api/v1/file/name", 1) - Trash = InitApiInfo(Api+"/api/v1/file/trash", 2) - UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2) - UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0) + AccessToken = InitApiInfo(Api+"/api/v1/access_token", 1) + RefreshToken = InitApiInfo(Api+"/api/v1/oauth2/access_token", 1) + UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1) + FileList = InitApiInfo(Api+"/api/v2/file/list", 3) + DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 5) + DirectLink = InitApiInfo(Api+"/api/v1/direct-link/url", 5) + Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2) + Move = InitApiInfo(Api+"/api/v1/file/move", 1) + Rename = InitApiInfo(Api+"/api/v1/file/name", 1) + Trash = InitApiInfo(Api+"/api/v1/file/trash", 2) + UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2) + UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0) + UploadFileDomain = InitApiInfo(Api+"/upload/v2/file/domain", 0) ) func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) { @@ -78,7 +79,10 @@ func (d *Open123) Request(apiInfo *ApiInfo, method string, callback base.ReqCall } else if baseResp.Code == 429 { time.Sleep(500 * time.Millisecond) log.Warningf("API: %s, QPS: %d, 请求太频繁,对应API提示过多请减小QPS", apiInfo.url, apiInfo.qps) + } else if baseResp.Code == 20103 { //code: 20103, error: 文件正在校验中,请间隔1秒后再试 + time.Sleep(2 * time.Second) } else { + log.Errorf("API: %s, body:%s, code: %d, error: %s", apiInfo.url, res.Body(), baseResp.Code, baseResp.Message) return nil, errors.New(baseResp.Message) } } diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 0fa94e885..0c60cebbb 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -4,12 +4,15 @@ import ( "context" "crypto/md5" "encoding/hex" + "encoding/json" "errors" "io" "net/url" "os" stdpath "path" + "path/filepath" "strconv" + "strings" "time" "github.com/OpenListTeam/OpenList/v4/drivers/base" @@ -17,6 +20,8 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/model/reqres" + "github.com/OpenListTeam/OpenList/v4/internal/model/tables" "github.com/OpenListTeam/OpenList/v4/pkg/errgroup" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/avast/retry-go" @@ -39,6 +44,14 @@ func (d *BaiduNetdisk) GetAddition() driver.Additional { return &d.Addition } +func (d *BaiduNetdisk) GetUploadInfo() *model.UploadInfo { + return &model.UploadInfo{ + SliceHashNeed: true, + HashMd5Need: true, + HashMd5256KBNeed: true, + } +} + func (d *BaiduNetdisk) Init(ctx context.Context) error { d.uploadThread, _ = strconv.Atoi(d.UploadThread) if d.uploadThread < 1 || d.uploadThread > 32 { @@ -364,4 +377,62 @@ func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string return nil } +// SliceUpload 上传分片 +func (d *BaiduNetdisk) SliceUpload(c context.Context, req *tables.SliceUpload, sliceno uint, fd io.Reader) error { + fp := filepath.Join(req.DstPath, req.Name) + if sliceno == 0 { //第一个分片需要先执行预上传 + rtype := 1 + if req.Overwrite { + rtype = 3 + } + precreateResp, err := d.precreate(&PrecreateReq{ + Path: fp, + Size: req.Size, + Isdir: 0, + BlockList: strings.Split(req.SliceHash, ","), + Autoinit: 1, + Rtype: rtype, + ContentMd5: req.HashMd5, + SliceMd5: req.HashMd5256KB, + }) + if err != nil { + return err + } + req.PreupID = precreateResp.Uploadid + } + err := d.uploadSlice(c, map[string]string{ + "method": "upload", + "access_token": d.AccessToken, + "type": "tmpfile", + "path": fp, + "uploadid": req.PreupID, + "partseq": strconv.Itoa(int(sliceno)), + }, req.Name, fd) + return err + +} + +// Preup 预上传(自定以接口,为了适配自定义的分片上传) +func (d *BaiduNetdisk) Preup(ctx context.Context, srcobj model.Obj, req *reqres.PreupReq) (*model.PreupInfo, error) { + return &model.PreupInfo{ + SliceSize: d.getSliceSize(req.Size), + }, nil +} + +// UploadSliceComplete 分片上传完成 +func (d *BaiduNetdisk) UploadSliceComplete(ctx context.Context, su *tables.SliceUpload) error { + fp := filepath.Join(su.DstPath, su.Name) + rsp := &SliceUpCompleteResp{} + t := time.Now().Unix() + sh, err := json.Marshal(strings.Split(su.SliceHash, ",")) + if err != nil { + return err + } + b, err := d.create(fp, int64(su.Size), 0, su.PreupID, string(sh), rsp, t, t) + if err != nil { + log.Error(err, rsp, string(b)) + } + return err +} + var _ driver.Driver = (*BaiduNetdisk)(nil) diff --git a/drivers/baidu_netdisk/types.go b/drivers/baidu_netdisk/types.go index ec8ceabc7..8de3091d1 100644 --- a/drivers/baidu_netdisk/types.go +++ b/drivers/baidu_netdisk/types.go @@ -189,3 +189,49 @@ type PrecreateResp struct { // return_type=2 File File `json:"info"` } + +// PrecreateReq 预上传请求 +type PrecreateReq struct { + Path string `json:"path"` // 上传后使用的文件绝对路径(需urlencode) + Size int64 `json:"size"` // 文件或目录大小,单位B + Isdir int `json:"isdir"` // 是否为目录,0 文件,1 目录 + BlockList []string `json:"block_list"` // 文件各分片MD5数组的json串 + Autoinit int `json:"autoinit"` // 固定值1 + Rtype int `json:"rtype,omitempty"` // 文件命名策略,非必填 + Uploadid string `json:"uploadid,omitempty"` // 上传ID,非必填 + ContentMd5 string `json:"content-md5,omitempty"` // 文件MD5,非必填 + SliceMd5 string `json:"slice-md5,omitempty"` // 文件校验段的MD5,非必填 + LocalCtime string `json:"local_ctime,omitempty"` // 客户端创建时间,非必填 + LocalMtime string `json:"local_mtime,omitempty"` // 客户端修改时间,非必填 +} + +// SliceupCompleteReq 分片上传完成请求 +type SliceUpCompleteReq struct { + Path string `json:"path"` // 上传后使用的文件绝对路径(需urlencode),与预上传precreate接口中的path保持一致 + Size int64 `json:"size"` // 文件或目录的大小,必须与实际大小一致 + Isdir int `json:"isdir"` // 是否目录,0 文件、1 目录,与预上传precreate接口中的isdir保持一致 + BlockList []string `json:"block_list"` // 文件各分片md5数组的json串,与预上传precreate接口中的block_list保持一致 + Uploadid string `json:"uploadid"` // 预上传precreate接口下发的uploadid + Rtype int `json:"rtype,omitempty"` // 文件命名策略,默认0 + LocalCtime int64 `json:"local_ctime,omitempty"` // 客户端创建时间(精确到秒),默认为当前时间戳 + LocalMtime int64 `json:"local_mtime,omitempty"` // 客户端修改时间(精确到秒),默认为当前时间戳 + ZipQuality int `json:"zip_quality,omitempty"` // 图片压缩程度,有效值50、70、100(带此参数时,zip_sign 参数需要一并带上) + ZipSign string `json:"zip_sign,omitempty"` // 未压缩原始图片文件真实md5(带此参数时,zip_quality 参数需要一并带上) + IsRevision int `json:"is_revision,omitempty"` // 是否需要多版本支持,1为支持,0为不支持,默认为0 + Mode int `json:"mode,omitempty"` // 上传方式,1手动、2批量上传、3文件自动备份、4相册自动备份、5视频自动备份 + ExifInfo string `json:"exif_info,omitempty"` // exif信息,json字符串,orientation、width、height、recovery为必传字段 +} + +// SliceUpCompleteResp 分片上传完成响应 +type SliceUpCompleteResp struct { + Errno int `json:"errno"` // 错误码 + FsID uint64 `json:"fs_id"` // 文件在云端的唯一标识ID + Md5 string `json:"md5,omitempty"` // 文件的MD5,只有提交文件时才返回,提交目录时没有该值 + ServerFilename string `json:"server_filename"` // 文件名 + Category int `json:"category"` // 分类类型, 1 视频 2 音频 3 图片 4 文档 5 应用 6 其他 7 种子 + Path string `json:"path"` // 上传后使用的文件绝对路径 + Size uint64 `json:"size"` // 文件大小,单位B + Ctime uint64 `json:"ctime"` // 文件创建时间 + Mtime uint64 `json:"mtime"` // 文件修改时间 + Isdir int `json:"isdir"` // 是否目录,0 文件、1 目录 +} diff --git a/drivers/baidu_netdisk/util.go b/drivers/baidu_netdisk/util.go index 6a51d9b99..39b980259 100644 --- a/drivers/baidu_netdisk/util.go +++ b/drivers/baidu_netdisk/util.go @@ -2,6 +2,7 @@ package baidu_netdisk import ( "encoding/hex" + "encoding/json" "errors" "fmt" "net/http" @@ -306,6 +307,37 @@ func (d *BaiduNetdisk) create(path string, size int64, isdir int, uploadid, bloc return d.postForm("/xpan/file", params, form, resp) } +func (d *BaiduNetdisk) precreate(req *PrecreateReq) (*PrecreateResp, error) { + bl, err := json.Marshal(req.BlockList) + if err != nil { + log.Errorf("json.Marshal error: %v", err) + return nil, err + } + b := map[string]string{ + "path": req.Path, + "size": strconv.Itoa(int(req.Size)), + "isdir": strconv.Itoa(req.Isdir), + "autoinit": strconv.Itoa(req.Autoinit), + "rtype": strconv.Itoa(req.Rtype), + "block_list": string(bl), + "content-md5": req.ContentMd5, + "slice-md5": req.SliceMd5, + } + + res := &PrecreateResp{} + r, err := d.request("https://pan.baidu.com/rest/2.0/xpan/file", http.MethodPost, func(rt *resty.Request) { + rt.SetQueryParam("method", "precreate"). + SetFormData(b) + + }, res) + if err != nil { + log.Errorf("baidu_netdisk precreate error: %s, %v", string(r), err) + return nil, err + } + return res, nil + +} + func joinTime(form map[string]string, ctime, mtime int64) { form["local_mtime"] = strconv.FormatInt(mtime, 10) form["local_ctime"] = strconv.FormatInt(ctime, 10) diff --git a/internal/conf/const.go b/internal/conf/const.go index fd0e1610d..9c883ed53 100644 --- a/internal/conf/const.go +++ b/internal/conf/const.go @@ -173,4 +173,5 @@ const ( UserAgentKey PathKey SharingIDKey + StorageKey ) diff --git a/internal/db/db.go b/internal/db/db.go index 96529c15d..8fc5149f5 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -5,6 +5,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/model/tables" "gorm.io/gorm" ) @@ -12,7 +13,7 @@ var db *gorm.DB func Init(d *gorm.DB) { db = d - err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey), new(model.SharingDB)) + err := AutoMigrate(new(model.Storage), new(model.User), new(model.Meta), new(model.SettingItem), new(model.SearchNode), new(model.TaskItem), new(model.SSHPublicKey), new(model.SharingDB), new(tables.SliceUpload)) if err != nil { log.Fatalf("failed migrate database: %s", err.Error()) } diff --git a/internal/db/slice_upload.go b/internal/db/slice_upload.go new file mode 100644 index 000000000..e914785b3 --- /dev/null +++ b/internal/db/slice_upload.go @@ -0,0 +1,19 @@ +package db + +import ( + "github.com/OpenListTeam/OpenList/v4/internal/model/tables" + "github.com/pkg/errors" +) + +func CreateSliceUpload(su *tables.SliceUpload) error { + return errors.WithStack(db.Create(su).Error) +} + +func GetSliceUpload(wh map[string]any) (*tables.SliceUpload, error) { + su := &tables.SliceUpload{} + return su, db.Where(wh).First(su).Error +} + +func UpdateSliceUpload(su *tables.SliceUpload) error { + return errors.WithStack(db.Save(su).Error) +} diff --git a/internal/driver/driver.go b/internal/driver/driver.go index 2884b5438..d6405fd93 100644 --- a/internal/driver/driver.go +++ b/internal/driver/driver.go @@ -2,8 +2,11 @@ package driver import ( "context" + "io" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/model/reqres" + "github.com/OpenListTeam/OpenList/v4/internal/model/tables" ) type Driver interface { @@ -81,6 +84,27 @@ type Remove interface { Remove(ctx context.Context, obj model.Obj) error } +// IUploadInfo 上传信息接口 +type IUploadInfo interface { + GetUploadInfo() *model.UploadInfo +} + +// IPreup 预上传接口 +type IPreup interface { + Preup(ctx context.Context, srcobj model.Obj, req *reqres.PreupReq) (*model.PreupInfo, error) +} + +// ISliceUpload 分片上传接口 +type ISliceUpload interface { + // SliceUpload 分片上传 + SliceUpload(ctx context.Context, req *tables.SliceUpload, sliceno uint, file io.Reader) error +} + +// IUploadSliceComplete 分片上传完成接口 +type IUploadSliceComplete interface { + UploadSliceComplete(ctx context.Context, req *tables.SliceUpload) error +} + type Put interface { // Put a file (provided as a FileStreamer) into the driver // Besides the most basic upload functionality, the following features also need to be implemented: diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 8c1f646b5..4ceb61550 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -2,15 +2,28 @@ package fs import ( "context" + "fmt" "io" + "mime/multipart" + "os" + "strings" + "sync" + "time" log "github.com/sirupsen/logrus" + "gorm.io/gorm" + "github.com/OpenListTeam/OpenList/v4/internal/conf" + "github.com/OpenListTeam/OpenList/v4/internal/db" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/model/reqres" + "github.com/OpenListTeam/OpenList/v4/internal/model/tables" "github.com/OpenListTeam/OpenList/v4/internal/op" + "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/task" + "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/pkg/errors" ) @@ -188,3 +201,334 @@ func PutURL(ctx context.Context, path, dstName, urlStr string) error { } return op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr) } + +/// 分片上传功能-------------------------------------------------------------------- + +// Preup 预上传 +func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.PreupReq) (*reqres.PreupResp, error) { + wh := map[string]any{} + wh["dst_path"] = req.Path + wh["name"] = req.Name + wh["size"] = req.Size + if req.Hash.Md5 != "" { + wh["hash_md5"] = req.Hash.Md5 + } + if req.Hash.Sha1 != "" { + wh["hash_sha1"] = req.Hash.Sha1 + } + if req.Hash.Md5256KB != "" { + wh["hash_md5_256kb"] = req.Hash.Md5256KB + } + + su, err := db.GetSliceUpload(wh) + if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + log.Error("GetSliceUpload", err) + return nil, errors.WithStack(err) + } + + if su.ID != 0 { // 已存在 + return &reqres.PreupResp{ + UploadID: su.ID, + SliceSize: su.SliceSize, + SliceCnt: su.SliceCnt, + SliceUploadStatus: su.SliceUploadStatus, + }, nil + } + srcobj, err := op.Get(c, s, actualPath) + if err != nil { + log.Error(err) + return nil, errors.WithStack(err) + } + user, _ := c.Value(conf.UserKey).(*model.User) + + //不存在 + createsu := &tables.SliceUpload{ + DstPath: req.Path, + DstID: srcobj.GetID(), + Size: req.Size, + Name: req.Name, + HashMd5: req.Hash.Md5, + HashMd5256KB: req.Hash.Md5256KB, + HashSha1: req.Hash.Sha1, + Overwrite: req.Overwrite, + ActualPath: actualPath, + UserID: user.ID, + AsTask: req.AsTask, + } + log.Infof("storage mount path %s", s.GetStorage().MountPath) + switch st := s.(type) { + case driver.IPreup: + log.Info("preup support") + res, err := st.Preup(c, srcobj, req) + if err != nil { + log.Error("Preup error", req, err) + return nil, errors.WithStack(err) + } + log.Info("Preup success", res) + if res.Reuse { //秒传 + return &reqres.PreupResp{ + Reuse: true, + SliceCnt: 0, + SliceSize: res.SliceSize, + UploadID: 0, + }, nil + + } + createsu.PreupID = res.PreupID + createsu.SliceSize = res.SliceSize + createsu.Server = res.Server + default: + log.Info("Preup not support") + createsu.SliceSize = 10 * utils.MB + } + createsu.SliceCnt = uint((req.Size + createsu.SliceSize - 1) / createsu.SliceSize) + createsu.SliceUploadStatus = make([]byte, (createsu.SliceCnt+7)/8) + + err = db.CreateSliceUpload(createsu) + if err != nil { + log.Error("CreateSliceUpload error", createsu, err) + return nil, errors.WithStack(err) + } + return &reqres.PreupResp{ + Reuse: false, + SliceUploadStatus: createsu.SliceUploadStatus, + SliceSize: createsu.SliceSize, + SliceCnt: createsu.SliceCnt, + UploadID: createsu.ID, + }, nil + +} + +type sliceup struct { + *tables.SliceUpload + tmpFile *os.File + *sync.Mutex +} + +// 分片上传缓存 +var sliceupMap = sync.Map{} + +// UploadSlice 上传切片,第一个分片必须先上传 +func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, file multipart.File) error { + var msu *sliceup + var err error + + sa, ok := sliceupMap.Load(req.UploadID) + if !ok { + su, e := db.GetSliceUpload(map[string]any{"id": req.UploadID}) + if e != nil { + log.Errorf("failed get slice upload [%d]: %+v", req.UploadID, e) + return e + } + msu = &sliceup{ + SliceUpload: su, + } + sliceupMap.Store(req.UploadID, msu) + } else { + msu = sa.(*sliceup) + } + defer func() { + if err != nil { + msu.Status = tables.SliceUploadStatusFailed + msu.Message = err.Error() + db.UpdateSliceUpload(msu.SliceUpload) + } + }() + + if req.SliceHash != "" { + sliceHash := []string{} // 分片hash + if tables.IsSliceUploaded(msu.SliceUploadStatus, int(req.SliceNum)) { + log.Warnf("slice already uploaded,req:%+v", req) + return nil + } + + //验证分片hash值 + if req.SliceNum == 0 { //第一个分片,slicehash是所有的分片hash + hs := strings.Split(req.SliceHash, ",") + if len(hs) != int(msu.SliceCnt) { + msg := fmt.Sprintf("failed verify slice hash cnt req: %+v", req) + log.Error(msg) + return errors.New(msg) + } + // 更新分片hash + msu.SliceHash = req.SliceHash + if err := db.UpdateSliceUpload(msu.SliceUpload); err != nil { + log.Error("UpdateSliceUpload error", msu.SliceUpload, err) + return err + } + msu.Status = tables.SliceUploadStatusUploading + sliceHash = hs + } else { // 如果不是第一个分片,slicehash是当前分片hash + sliceHash = strings.Split(msu.SliceHash, ",") + if req.SliceHash != sliceHash[req.SliceNum] { //比对分片hash是否与之前上传的一致 + msg := fmt.Sprintf("failed verify slice hash,req: [%+v]", req) + log.Error(msg) + return errors.New(msg) + } + } + } + + switch s := storage.(type) { + case driver.ISliceUpload: + if err := s.SliceUpload(ctx, msu.SliceUpload, req.SliceNum, file); err != nil { + log.Error("SliceUpload error", req, err) + return err + } + + default: //其他网盘先缓存到本地 + if msu.TmpFile == "" { + tf, err := os.CreateTemp(conf.Conf.TempDir, "file-*") + if err != nil { + log.Error("CreateTemp error", req, err) + return err + } + abspath := tf.Name() //这里返回的是绝对路径 + err = os.Truncate(abspath, int64(msu.Size)) + if err != nil { + log.Error("Truncate error", req, err) + return err + } + msu.TmpFile = abspath + msu.tmpFile = tf + } + if msu.tmpFile == nil { + msu.tmpFile, err = os.OpenFile(msu.TmpFile, os.O_RDWR, 0644) + if err != nil { + log.Error("OpenFile error", req, msu.TmpFile, err) + return err + } + } + + content, err := io.ReadAll(file) //这里一次性读取全部,如果并发较多,可能会占用较多内存 + if err != nil { + log.Error("ReadAll error", req, err) + return err + } + _, err = msu.tmpFile.WriteAt(content, int64(req.SliceNum)*int64(msu.SliceSize)) + if err != nil { + log.Error("WriteAt error", req, err) + return err + } + } + tables.SetSliceUploaded(msu.SliceUploadStatus, int(req.SliceNum)) + + err = db.UpdateSliceUpload(msu.SliceUpload) + if err != nil { + log.Error("UpdateSliceUpload error", msu.SliceUpload, err) + return err + } + return nil + +} + +// SliceUpComplete 完成分片上传 +func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) (*reqres.UploadSliceCompleteResp, error) { + var msu *sliceup + var err error + + sa, ok := sliceupMap.Load(uploadID) + if !ok { + su, err := db.GetSliceUpload(map[string]any{"id": uploadID}) + if err != nil { + log.Errorf("failed get slice upload [%d]: %+v", uploadID, err) + return nil, err + } + msu = &sliceup{ + SliceUpload: su, + } + + } else { + msu = sa.(*sliceup) + } + if !tables.IsAllSliceUploaded(msu.SliceUploadStatus, msu.SliceCnt) { + return &reqres.UploadSliceCompleteResp{ + Complete: 0, + SliceUploadStatus: msu.SliceUploadStatus, + UploadID: msu.ID, + }, nil + + } + + defer func() { + if err != nil { + msu.Status = tables.SliceUploadStatusFailed + msu.Message = err.Error() + db.UpdateSliceUpload(msu.SliceUpload) + } + if msu.tmpFile != nil { + msu.tmpFile.Close() + } + sliceupMap.Delete(msu.ID) + + }() + switch s := storage.(type) { + case driver.IUploadSliceComplete: + err = s.UploadSliceComplete(ctx, msu.SliceUpload) + if err != nil { + log.Error("UploadSliceComplete error", msu.SliceUpload, err) + return nil, err + } + msu.Status = tables.SliceUploadStatusComplete + db.UpdateSliceUpload(msu.SliceUpload) + rsp := &reqres.UploadSliceCompleteResp{ + Complete: 1, + UploadID: msu.ID, + } + // 清理缓存及临时文件 + if msu.tmpFile != nil { + msu.tmpFile.Close() + } + os.Remove(msu.TmpFile) + + return rsp, nil + + default: + //其他网盘客户端上传到本地后,上传到网盘,使用任务处理 + fd, err := os.Open(msu.TmpFile) + if err != nil { + log.Error("Open error", msu.TmpFile, err) + return nil, err + } + var hashInfo utils.HashInfo + if msu.HashMd5 != "" { + hashInfo = utils.NewHashInfo(utils.MD5, msu.HashMd5) + } + if msu.HashSha1 != "" { + hashInfo = utils.NewHashInfo(utils.SHA1, msu.HashSha1) + } + + file := &stream.FileStream{ + Obj: &model.Object{ + Name: msu.Name, + Size: msu.Size, + Modified: time.Now(), + HashInfo: hashInfo, + }, + Reader: fd, + Mimetype: "application/octet-stream", + WebPutAsTask: false, + } + if msu.AsTask { + _, err = putAsTask(ctx, msu.DstPath, file) + if err != nil { + log.Error("putAsTask error", msu.SliceUpload, err) + return nil, err + } + return &reqres.UploadSliceCompleteResp{ + Complete: 2, + UploadID: msu.ID, + }, nil + } + err = op.Put(ctx, storage, msu.ActualPath, file, nil) + if err != nil { + log.Error("Put error", msu.SliceUpload, err) + return nil, err + } + return &reqres.UploadSliceCompleteResp{ + Complete: 1, + UploadID: msu.ID, + }, nil + + } + +} diff --git a/internal/model/obj.go b/internal/model/obj.go index 836904fce..750dc2698 100644 --- a/internal/model/obj.go +++ b/internal/model/obj.go @@ -227,3 +227,9 @@ func (om *ObjMerge) InitHideReg(hides string) { func (om *ObjMerge) Reset() { om.set.Clear() } + +type Hash struct { + Md5 string `json:"md5"` + Md5256KB string `json:"md5_256kb"` + Sha1 string `json:"sha1"` +} diff --git a/internal/model/reqres/upload.go b/internal/model/reqres/upload.go new file mode 100644 index 000000000..51f917669 --- /dev/null +++ b/internal/model/reqres/upload.go @@ -0,0 +1,41 @@ +package reqres + +import "github.com/OpenListTeam/OpenList/v4/internal/model" + +// PreupReq 预上传请求 +type PreupReq struct { + Path string `json:"path"` // 上传到的挂载路径 + Name string `json:"name"` + Size int64 `json:"size"` + Hash model.Hash `json:"hash"` + Overwrite bool `json:"overwrite"` // 是否覆盖同名文件 + AsTask bool `json:"as_task"` +} + +// PreupResp 预上传响应 +type PreupResp struct { + UploadID uint `json:"upload_id"` // 上传ID,不是网盘返回的,是本地数据的id + SliceSize int64 `json:"slice_size"` //分片大小,单位:字节 + SliceCnt uint `json:"slice_cnt"` // 分片数量 + SliceUploadStatus []byte `json:"slice_upload_status"` // 分片上传状态 + Reuse bool `json:"reuse"` //是否秒传 +} + +// UploadSliceReq 上传分片请求 +type UploadSliceReq struct { + UploadID uint `json:"upload_id"` // 上传ID,不是网盘返回的,是本地数据的id + SliceHash string `json:"slice_hash"` // 分片hash,如果是第一个分片,则需包含所有分片hash,用","分割 + SliceNum uint `json:"slice_num"` // 分片序号 +} + +// UploadSliceCompleteReq 分片上传完成请求 +type UploadSliceCompleteReq struct { + UploadID uint `json:"upload_id"` // 上传ID,不是网盘返回的,是本地数据的id +} + +// UploadSliceCompleteResp 分片上传完成响应 +type UploadSliceCompleteResp struct { + UploadID uint `json:"upload_id"` // 上传ID,不是网盘返回的,是本地数据的id + SliceUploadStatus []byte `json:"slice_upload_status"` // 分片上传状态 + Complete uint `json:"complete"` //完成状态 0 未完成,分片缺失 1 完成 2 成功上传到代理服务 +} diff --git a/internal/model/tables/base.go b/internal/model/tables/base.go new file mode 100644 index 000000000..4debf2285 --- /dev/null +++ b/internal/model/tables/base.go @@ -0,0 +1,10 @@ +package tables + +import "time" + +// Base 表基础字段 +type Base struct { + ID uint `json:"id" gorm:"primaryKey;autoIncrement"` + CreatedAt time.Time `json:"created_at" gorm:"autoCreateTime"` + UpdatedAt time.Time `json:"updated_at" gorm:"autoUpdateTime"` +} diff --git a/internal/model/tables/slice_upload.go b/internal/model/tables/slice_upload.go new file mode 100644 index 000000000..72d18b213 --- /dev/null +++ b/internal/model/tables/slice_upload.go @@ -0,0 +1,61 @@ +package tables + +const ( + //SliceUploadStatusWaiting 等待上传 + SliceUploadStatusWaiting = iota + // SliceUploadStatusUploading 正在上传 + SliceUploadStatusUploading + // SliceUploadStatusCancelled 取消上传 + SliceUploadStatusCancelled + // SliceUploadStatusComplete 上传完成 + SliceUploadStatusComplete + // SliceUploadStatusFailed 上传失败 + SliceUploadStatusFailed + // SliceUploadStatusProxyComplete 成功上传到代理服务,等待上传到网盘 + SliceUploadStatusProxyComplete +) + +// SliceUpload 分片上传数据表 +type SliceUpload struct { + Base + PreupID string `json:"preup_id"` // 网盘返回的预上传id + SliceSize int64 `json:"slice_size"` // 分片大小,单位:字节 + DstID string `json:"dst_id"` // 目标文件夹ID,部分网盘需要 + DstPath string `json:"dst_path"` // 挂载的父文件夹路径 + ActualPath string `json:"actual_path"` //网盘真实父文件夹路径,不同的网盘,这个值可能相同,比如有相同的目录的两个网盘 + Name string `json:"name"` // 文件名 + Size int64 `json:"size"` // 文件大小 + TmpFile string `json:"tmp_file"` //不支持分片上传的文件临时文件路径 + HashMd5 string `json:"hash_md5"` // md5 + HashMd5256KB string `json:"hash_md5_256kb" gorm:"column:hash_md5_256kb;type:varchar(32)"` // md5256KB + HashSha1 string `json:"hash_sha1"` // sha1 + SliceHash string `json:"slice_hash"` // 分片hash + SliceCnt uint `json:"slice_cnt"` // 分片数量 + SliceUploadStatus []byte `json:"slice_upload_status"` //分片上传状态,对应位置1表示分片已上传 + Server string `json:"server"` // 上传服务器 + Status int `json:"status"` //上传状态 + Message string `json:"message"` // 失败错误信息 + Overwrite bool `json:"overwrite"` // 是否覆盖同名文件 + UserID uint `json:"user_id"` //用户id + AsTask bool `json:"as_task"` +} + +// IsSliceUploaded 判断第i个分片是否已上传 +func IsSliceUploaded(status []byte, i int) bool { + return status[i/8]&(1<<(i%8)) != 0 +} + +// SetSliceUploaded 标记第i个分片已上传 +func SetSliceUploaded(status []byte, i int) { + status[i/8] |= 1 << (i % 8) +} + +// IsAllSliceUploaded 是否全部上传完成 +func IsAllSliceUploaded(status []byte, sliceCnt uint) bool { + for i := range sliceCnt { + if status[i/8]&(1<<(i%8)) == 0 { + return false + } + } + return true +} diff --git a/internal/model/upload.go b/internal/model/upload.go new file mode 100644 index 000000000..5911ff74d --- /dev/null +++ b/internal/model/upload.go @@ -0,0 +1,17 @@ +package model + +// UploadInfo 上传所需信息 +type UploadInfo struct { + SliceHashNeed bool `json:"slice_hash_need"` //是否需要分片哈希 + HashMd5Need bool `json:"hash_md5_need"` //是否需要md5 + HashMd5256KBNeed bool `json:"hash_md5_256kb_need"` //是否需要前256KB的md5 + HashSha1Need bool `json:"hash_sha1_need"` //是否需要sha1 +} + +// PreupInfo 预上传信息 +type PreupInfo struct { + PreupID string `json:"preup_id"` //预上传id,由网盘返回 + SliceSize int64 `json:"slice_size"` //分片大小 + Server string `json:"server"` //上传服务器地址 + Reuse bool `json:"reuse"` //是否秒传 +} diff --git a/server/handles/fsup.go b/server/handles/fsup.go index 087a58a9a..d444923a2 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -8,8 +8,10 @@ import ( "time" "github.com/OpenListTeam/OpenList/v4/internal/conf" + "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/fs" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/model/reqres" "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/task" "github.com/OpenListTeam/OpenList/v4/pkg/utils" @@ -203,3 +205,101 @@ func FsForm(c *gin.Context) { "task": getTaskInfo(t), }) } + +// 分片上传流程如下 +// 1. 客户端调用FsUpHash获取上传所需的信息(目前主要是hash信息) +// 2. 根据获取到的hash信息,客户端调用FsPreup上传必要的参数,获取分片大小,及需上传的分片列表 +// 3. 客户端根据分片列表进行分片上传,如果分片是第一个,且需要sliceHash,那么需要把所有分片的hash带上 +// 4. 如果中途出现问题,可以重新进行分片上传流程,后端根据记录的信息进行恢复 +// 如果网盘不支持分片上传,则会进行本地中转,对客户端来说,仍然是分片上传 + +// FsUpInfo 获取上传所需的信息 +func FsUpInfo(c *gin.Context) { + storage := c.Request.Context().Value(conf.StorageKey) + + uh := &model.UploadInfo{ + SliceHashNeed: false, + HashMd5Need: true, + } + switch s := storage.(type) { + case driver.IUploadInfo: + uh = s.GetUploadInfo() + } + common.SuccessResp(c, uh) +} + +// FsPreup 预上传 +func FsPreup(c *gin.Context) { + req := &reqres.PreupReq{} + err := c.ShouldBindJSON(req) + if err != nil { + common.ErrorResp(c, err, 400) + return + } + storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) + path := c.Request.Context().Value(conf.PathKey).(string) + + res, err := fs.Preup(c.Request.Context(), storage, path, req) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + common.SuccessResp(c, res) +} + +// FsUpSlice 上传分片 +func FsUpSlice(c *gin.Context) { + req := &reqres.UploadSliceReq{} + req.SliceHash = c.PostForm("slice_hash") + sn, err := strconv.ParseUint(c.PostForm("slice_num"), 10, 32) + if err != nil { + common.ErrorResp(c, err, 400) + return + } + req.SliceNum = uint(sn) + upid, err := strconv.ParseUint(c.PostForm("upload_id"), 10, 64) + if err != nil { + common.ErrorResp(c, err, 400) + return + } + req.UploadID = uint(upid) + + file, err := c.FormFile("slice") + if err != nil { + common.ErrorResp(c, err, 400) + return + } + fd, err := file.Open() + if err != nil { + common.ErrorResp(c, err, 500) + return + } + defer fd.Close() + + storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) + + err = fs.UploadSlice(c.Request.Context(), storage, req, fd) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + common.SuccessResp(c) +} + +// FsUpSliceComplete 上传分片完成 +func FsUpSliceComplete(c *gin.Context) { + req := &reqres.UploadSliceCompleteReq{} + err := c.ShouldBindJSON(req) + if err != nil { + common.ErrorResp(c, err, 400) + return + } + storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) + rsp, err := fs.SliceUpComplete(c.Request.Context(), storage, req.UploadID) + if err != nil { + common.ErrorResp(c, err, 500) + return + } + common.SuccessResp(c, rsp) + +} diff --git a/server/middlewares/fs.go b/server/middlewares/fs.go new file mode 100644 index 000000000..7556219dc --- /dev/null +++ b/server/middlewares/fs.go @@ -0,0 +1,94 @@ +package middlewares + +import ( + "net/url" + stdpath "path" + + "github.com/OpenListTeam/OpenList/v4/internal/conf" + "github.com/OpenListTeam/OpenList/v4/internal/errs" + "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/op" + "github.com/OpenListTeam/OpenList/v4/server/common" + "github.com/gin-gonic/gin" + "github.com/pkg/errors" +) + +// 文件操作相关中间件 +// 把文件路径校验及鉴权统一放在这里处理,并将处理结果放到上下文里,避免后续重复处理 +// // Middleware for file operations +// Centralizes file path validation and authentication here, and stores the results in the context +// to avoid redundant processing in subsequent steps + +type permissionFunc func(user *model.User, meta *model.Meta, path string, password string) bool + +func FsUp(c *gin.Context) { + fs(c, false, func(user *model.User, meta *model.Meta, path string, password string) bool { + return common.CanAccess(user, meta, path, password) && (user.CanWrite() || common.CanWrite(meta, stdpath.Dir(path))) + }) +} +func FsRename(c *gin.Context) { + fs(c, true, func(user *model.User, meta *model.Meta, path string, password string) bool { + return user.CanRename() + }) +} +func FsRemove(c *gin.Context) { + fs(c, true, func(user *model.User, meta *model.Meta, path string, password string) bool { + return user.CanRemove() + }) +} +func FsSliceUp(c *gin.Context) { + fs(c, true, func(user *model.User, meta *model.Meta, path string, password string) bool { + return common.CanAccess(user, meta, path, password) && (user.CanWrite() || common.CanWrite(meta, stdpath.Dir(path))) + }) +} + +func fs(c *gin.Context, withstorage bool, permission permissionFunc) { + path := c.GetHeader("File-Path") + password := c.GetHeader("Password") + path, err := url.PathUnescape(path) + if err != nil { + common.ErrorResp(c, err, 400) + c.Abort() + return + } + user := c.Request.Context().Value(conf.UserKey).(*model.User) + path, err = user.JoinPath(path) + if err != nil { + common.ErrorResp(c, err, 403) + c.Abort() + return + } + meta, err := op.GetNearestMeta(stdpath.Dir(path)) + if err != nil { + if !errors.Is(errors.Cause(err), errs.MetaNotFound) { + common.ErrorResp(c, err, 500, true) + c.Abort() + return + } + } + + if !permission(user, meta, path, password) { + common.ErrorResp(c, errs.PermissionDenied, 403) + c.Abort() + return + } + + if withstorage { + storage, actualPath, err := op.GetStorageAndActualPath(path) + if err != nil { + common.ErrorResp(c, err, 400) + c.Abort() + return + } + if storage.Config().NoUpload { + common.ErrorStrResp(c, "Current storage doesn't support upload", 403) + c.Abort() + return + } + common.GinWithValue(c, conf.StorageKey, storage) + common.GinWithValue(c, conf.PathKey, actualPath) //这里的路径已经是网盘真实路径了 + + } + + c.Next() +} diff --git a/server/middlewares/fsup.go b/server/middlewares/fsup.go deleted file mode 100644 index 08b160ee5..000000000 --- a/server/middlewares/fsup.go +++ /dev/null @@ -1,45 +0,0 @@ -package middlewares - -import ( - "net/url" - stdpath "path" - - "github.com/OpenListTeam/OpenList/v4/internal/conf" - "github.com/OpenListTeam/OpenList/v4/internal/errs" - "github.com/OpenListTeam/OpenList/v4/internal/model" - "github.com/OpenListTeam/OpenList/v4/internal/op" - "github.com/OpenListTeam/OpenList/v4/server/common" - "github.com/gin-gonic/gin" - "github.com/pkg/errors" -) - -func FsUp(c *gin.Context) { - path := c.GetHeader("File-Path") - password := c.GetHeader("Password") - path, err := url.PathUnescape(path) - if err != nil { - common.ErrorResp(c, err, 400) - c.Abort() - return - } - user := c.Request.Context().Value(conf.UserKey).(*model.User) - path, err = user.JoinPath(path) - if err != nil { - common.ErrorResp(c, err, 403) - return - } - meta, err := op.GetNearestMeta(stdpath.Dir(path)) - if err != nil { - if !errors.Is(errors.Cause(err), errs.MetaNotFound) { - common.ErrorResp(c, err, 500, true) - c.Abort() - return - } - } - if !(common.CanAccess(user, meta, path, password) && (user.CanWrite() || common.CanWrite(meta, stdpath.Dir(path)))) { - common.ErrorResp(c, errs.PermissionDenied, 403) - c.Abort() - return - } - c.Next() -} diff --git a/server/router.go b/server/router.go index 66f0539ba..754ef2ce3 100644 --- a/server/router.go +++ b/server/router.go @@ -205,6 +205,12 @@ func _fs(g *gin.RouterGroup) { g.PUT("/put", middlewares.FsUp, uploadLimiter, handles.FsStream) g.PUT("/form", middlewares.FsUp, uploadLimiter, handles.FsForm) g.POST("/link", middlewares.AuthAdmin, handles.Link) + + g.GET("/upload/info", middlewares.FsSliceUp, handles.FsUpInfo) + g.POST("/preup", middlewares.FsSliceUp, handles.FsPreup) + g.POST("/slice_upload", middlewares.FsSliceUp, handles.FsUpSlice) + g.POST("/slice_upload_complete", middlewares.FsSliceUp, handles.FsUpSliceComplete) + // g.POST("/add_aria2", handles.AddOfflineDownload) // g.POST("/add_qbit", handles.AddQbittorrent) // g.POST("/add_transmission", handles.SetTransmission) From f4056db626a20674411fa7c11f5e8ff657896013 Mon Sep 17 00:00:00 2001 From: jenken827 Date: Tue, 26 Aug 2025 12:13:53 +0800 Subject: [PATCH 02/26] feat(fs): implement slice upload --- internal/fs/fs.go | 46 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 11 deletions(-) diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 4ceb61550..7884881b9 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -302,12 +302,27 @@ func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.Pr type sliceup struct { *tables.SliceUpload tmpFile *os.File - *sync.Mutex + sync.Mutex } // 分片上传缓存 var sliceupMap = sync.Map{} +type sliceWriter struct { + file *os.File + offset int64 +} + +// Write implements io.Writer interface +// 虽然每个分片都定义了一个sliceWriter +// 但是Write方法会在同一个分片复制过程中多次调用, +// 所以要更新自身的offset +func (sw *sliceWriter) Write(p []byte) (int, error) { + n, err := sw.file.WriteAt(p, sw.offset) + sw.offset += int64(n) + return n, err +} + // UploadSlice 上传切片,第一个分片必须先上传 func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, file multipart.File) error { var msu *sliceup @@ -335,12 +350,14 @@ func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadS } }() + // 检查分片是否已上传过 + if tables.IsSliceUploaded(msu.SliceUploadStatus, int(req.SliceNum)) { + log.Warnf("slice already uploaded,req:%+v", req) + return nil + } + if req.SliceHash != "" { sliceHash := []string{} // 分片hash - if tables.IsSliceUploaded(msu.SliceUploadStatus, int(req.SliceNum)) { - log.Warnf("slice already uploaded,req:%+v", req) - return nil - } //验证分片hash值 if req.SliceNum == 0 { //第一个分片,slicehash是所有的分片hash @@ -376,15 +393,18 @@ func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadS } default: //其他网盘先缓存到本地 + msu.Lock() if msu.TmpFile == "" { tf, err := os.CreateTemp(conf.Conf.TempDir, "file-*") if err != nil { + msu.Unlock() log.Error("CreateTemp error", req, err) return err } abspath := tf.Name() //这里返回的是绝对路径 err = os.Truncate(abspath, int64(msu.Size)) if err != nil { + msu.Unlock() log.Error("Truncate error", req, err) return err } @@ -394,19 +414,22 @@ func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadS if msu.tmpFile == nil { msu.tmpFile, err = os.OpenFile(msu.TmpFile, os.O_RDWR, 0644) if err != nil { + msu.Unlock() log.Error("OpenFile error", req, msu.TmpFile, err) return err } } + msu.Unlock() - content, err := io.ReadAll(file) //这里一次性读取全部,如果并发较多,可能会占用较多内存 - if err != nil { - log.Error("ReadAll error", req, err) - return err + // 流式复制,减少内存占用 + sw := &sliceWriter{ + file: msu.tmpFile, + offset: int64(req.SliceNum) * int64(msu.SliceSize), } - _, err = msu.tmpFile.WriteAt(content, int64(req.SliceNum)*int64(msu.SliceSize)) + _, err := io.Copy(sw, file) + if err != nil { - log.Error("WriteAt error", req, err) + log.Error("Copy error", req, err) return err } } @@ -524,6 +547,7 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) log.Error("Put error", msu.SliceUpload, err) return nil, err } + os.Remove(msu.TmpFile) return &reqres.UploadSliceCompleteResp{ Complete: 1, UploadID: msu.ID, From 50b55ddf15aeeeca81efc9f9c19b73fb315af3ea Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Fri, 5 Sep 2025 12:58:08 +0800 Subject: [PATCH 03/26] fix(fs): Fix concurrent race conditions, optimize segment upload status updates and temporary file processing --- drivers/123_open/driver.go | 9 +++++--- drivers/baidu_netdisk/driver.go | 4 ++-- internal/fs/fs.go | 41 ++++++++++++++++++++++++++------- 3 files changed, 41 insertions(+), 13 deletions(-) diff --git a/drivers/123_open/driver.go b/drivers/123_open/driver.go index 9af7da8b4..c6e9cb77a 100644 --- a/drivers/123_open/driver.go +++ b/drivers/123_open/driver.go @@ -225,7 +225,7 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre } // Preup 预上传 -func (d *Open123) Preup(c context.Context, srcobj model.Obj, req *reqres.PreupReq) (*model.PreupInfo, error) { +func (d *Open123) Preup(ctx context.Context, srcobj model.Obj, req *reqres.PreupReq) (*model.PreupInfo, error) { pid, err := strconv.ParseUint(srcobj.GetID(), 10, 64) if err != nil { return nil, err @@ -256,8 +256,11 @@ func (d *Open123) Preup(c context.Context, srcobj model.Obj, req *reqres.PreupRe } // UploadSlice 上传分片 -func (d *Open123) SliceUpload(c context.Context, req *tables.SliceUpload, sliceno uint, fd io.Reader) error { +func (d *Open123) SliceUpload(ctx context.Context, req *tables.SliceUpload, sliceno uint, fd io.Reader) error { sh := strings.Split(req.SliceHash, ",") + if int(sliceno) >= len(sh) { + return fmt.Errorf("slice number %d out of range, total slices: %d", sliceno, len(sh)) + } r := &UploadSliceReq{ Name: req.Name, PreuploadID: req.PreupID, @@ -270,7 +273,7 @@ func (d *Open123) SliceUpload(c context.Context, req *tables.SliceUpload, slicen } // UploadSliceComplete 分片上传完成 -func (d *Open123) UploadSliceComplete(c context.Context, su *tables.SliceUpload) error { +func (d *Open123) UploadSliceComplete(ctx context.Context, su *tables.SliceUpload) error { return d.sliceUpComplete(su.PreupID) } diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 0c60cebbb..4589f4e2d 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -18,7 +18,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/driver" - "github.com/OpenListTeam/OpenList/v4/internal/errs" + "github.com/OpenListTeam/OpenList/ err := d.uploadSlice(ctx, map[string]string{4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model/reqres" "github.com/OpenListTeam/OpenList/v4/internal/model/tables" @@ -378,7 +378,7 @@ func (d *BaiduNetdisk) uploadSlice(ctx context.Context, params map[string]string } // SliceUpload 上传分片 -func (d *BaiduNetdisk) SliceUpload(c context.Context, req *tables.SliceUpload, sliceno uint, fd io.Reader) error { +func (d *BaiduNetdisk) SliceUpload(ctx context.Context, req *tables.SliceUpload, sliceno uint, fd io.Reader) error { fp := filepath.Join(req.DstPath, req.Name) if sliceno == 0 { //第一个分片需要先执行预上传 rtype := 1 diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 7884881b9..87f8fbd04 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -328,11 +328,14 @@ func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadS var msu *sliceup var err error - sa, ok := sliceupMap.Load(req.UploadID) - if !ok { + // 使用 LoadOrStore 避免并发竞态条件 + sa, loaded := sliceupMap.LoadOrStore(req.UploadID, nil) + if !loaded { + // 首次加载,需要从数据库获取 su, e := db.GetSliceUpload(map[string]any{"id": req.UploadID}) if e != nil { log.Errorf("failed get slice upload [%d]: %+v", req.UploadID, e) + sliceupMap.Delete(req.UploadID) // 清理无效的 key return e } msu = &sliceup{ @@ -346,7 +349,9 @@ func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadS if err != nil { msu.Status = tables.SliceUploadStatusFailed msu.Message = err.Error() - db.UpdateSliceUpload(msu.SliceUpload) + if updateErr := db.UpdateSliceUpload(msu.SliceUpload); updateErr != nil { + log.Errorf("Failed to update slice upload status: %v", updateErr) + } } }() @@ -476,10 +481,20 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) if err != nil { msu.Status = tables.SliceUploadStatusFailed msu.Message = err.Error() - db.UpdateSliceUpload(msu.SliceUpload) + if updateErr := db.UpdateSliceUpload(msu.SliceUpload); updateErr != nil { + log.Errorf("Failed to update slice upload status: %v", updateErr) + } } + // 确保资源清理 if msu.tmpFile != nil { - msu.tmpFile.Close() + if closeErr := msu.tmpFile.Close(); closeErr != nil { + log.Errorf("Failed to close tmp file: %v", closeErr) + } + } + if msu.TmpFile != "" { + if removeErr := os.Remove(msu.TmpFile); removeErr != nil && !os.IsNotExist(removeErr) { + log.Errorf("Failed to remove tmp file %s: %v", msu.TmpFile, removeErr) + } } sliceupMap.Delete(msu.ID) @@ -499,9 +514,15 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) } // 清理缓存及临时文件 if msu.tmpFile != nil { - msu.tmpFile.Close() + if closeErr := msu.tmpFile.Close(); closeErr != nil { + log.Errorf("Failed to close tmp file: %v", closeErr) + } + } + if msu.TmpFile != "" { + if removeErr := os.Remove(msu.TmpFile); removeErr != nil && !os.IsNotExist(removeErr) { + log.Errorf("Failed to remove tmp file %s: %v", msu.TmpFile, removeErr) + } } - os.Remove(msu.TmpFile) return rsp, nil @@ -547,7 +568,11 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) log.Error("Put error", msu.SliceUpload, err) return nil, err } - os.Remove(msu.TmpFile) + if msu.TmpFile != "" { + if removeErr := os.Remove(msu.TmpFile); removeErr != nil && !os.IsNotExist(removeErr) { + log.Errorf("Failed to remove tmp file %s: %v", msu.TmpFile, removeErr) + } + } return &reqres.UploadSliceCompleteResp{ Complete: 1, UploadID: msu.ID, From 6c32b69353942676b6fdebab18fbad418041bfaf Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Fri, 5 Sep 2025 13:03:26 +0800 Subject: [PATCH 04/26] fix(fs): Fix import path errors and unify context parameters --- drivers/baidu_netdisk/driver.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 4589f4e2d..18f1392af 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -18,7 +18,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/drivers/base" "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/driver" - "github.com/OpenListTeam/OpenList/ err := d.uploadSlice(ctx, map[string]string{4/internal/errs" + "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model/reqres" "github.com/OpenListTeam/OpenList/v4/internal/model/tables" @@ -400,7 +400,7 @@ func (d *BaiduNetdisk) SliceUpload(ctx context.Context, req *tables.SliceUpload, } req.PreupID = precreateResp.Uploadid } - err := d.uploadSlice(c, map[string]string{ + err := d.uploadSlice(ctx, map[string]string{ "method": "upload", "access_token": d.AccessToken, "type": "tmpfile", From c37659e9089dcc36a56c0732d261405f451e2d43 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Fri, 5 Sep 2025 16:14:33 +0800 Subject: [PATCH 05/26] =?UTF-8?q?fix(fs):=20=E7=A7=BB=E9=99=A4=E9=87=8D?= =?UTF-8?q?=E5=A4=8D=E7=9A=84=E6=B8=85=E7=90=86=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/fs/fs.go | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 87f8fbd04..f989e6a5d 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -512,18 +512,6 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) Complete: 1, UploadID: msu.ID, } - // 清理缓存及临时文件 - if msu.tmpFile != nil { - if closeErr := msu.tmpFile.Close(); closeErr != nil { - log.Errorf("Failed to close tmp file: %v", closeErr) - } - } - if msu.TmpFile != "" { - if removeErr := os.Remove(msu.TmpFile); removeErr != nil && !os.IsNotExist(removeErr) { - log.Errorf("Failed to remove tmp file %s: %v", msu.TmpFile, removeErr) - } - } - return rsp, nil default: @@ -568,11 +556,6 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) log.Error("Put error", msu.SliceUpload, err) return nil, err } - if msu.TmpFile != "" { - if removeErr := os.Remove(msu.TmpFile); removeErr != nil && !os.IsNotExist(removeErr) { - log.Errorf("Failed to remove tmp file %s: %v", msu.TmpFile, removeErr) - } - } return &reqres.UploadSliceCompleteResp{ Complete: 1, UploadID: msu.ID, From 507b19a8b268234c20793905f766ce35251689b9 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Fri, 5 Sep 2025 16:17:21 +0800 Subject: [PATCH 06/26] =?UTF-8?q?=E4=BF=AE=E5=A4=8D=E4=B8=8A=E4=BC=A0?= =?UTF-8?q?=E4=BB=BB=E5=8A=A1=E7=9A=84=E6=96=87=E4=BB=B6=E8=A2=AB=E6=8F=90?= =?UTF-8?q?=E5=89=8D=E6=B8=85=E7=90=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/fs/fs.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/internal/fs/fs.go b/internal/fs/fs.go index f989e6a5d..0b10158ba 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -431,7 +431,7 @@ func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadS file: msu.tmpFile, offset: int64(req.SliceNum) * int64(msu.SliceSize), } - _, err := io.Copy(sw, file) + _, err := utils.CopyWithBuffer(sw, file) if err != nil { log.Error("Copy error", req, err) @@ -516,9 +516,9 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) default: //其他网盘客户端上传到本地后,上传到网盘,使用任务处理 - fd, err := os.Open(msu.TmpFile) - if err != nil { - log.Error("Open error", msu.TmpFile, err) + if msu.tmpFile == nil { + err := fmt.Errorf("tmp file not found [%d]", uploadID) + log.Error(err) return nil, err } var hashInfo utils.HashInfo @@ -536,11 +536,13 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) Modified: time.Now(), HashInfo: hashInfo, }, - Reader: fd, - Mimetype: "application/octet-stream", - WebPutAsTask: false, } + file.Mimetype = utils.GetMimeType(msu.Name) if msu.AsTask { + file.SetTmpFile(msu.tmpFile) + // 置空,避免defer中被清理 + msu.tmpFile = nil + msu.TmpFile = "" _, err = putAsTask(ctx, msu.DstPath, file) if err != nil { log.Error("putAsTask error", msu.SliceUpload, err) @@ -551,6 +553,7 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) UploadID: msu.ID, }, nil } + file.Reader = msu.tmpFile err = op.Put(ctx, storage, msu.ActualPath, file, nil) if err != nil { log.Error("Put error", msu.SliceUpload, err) @@ -560,7 +563,6 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) Complete: 1, UploadID: msu.ID, }, nil - } } From 6fcb63c6acfda52a0acf35b161a7b4bd26abe4a2 Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Fri, 5 Sep 2025 20:30:05 +0800 Subject: [PATCH 07/26] fix(upload): Added task ID support, optimized segment upload logic and error handling --- drivers/123_open/driver.go | 25 ++- drivers/baidu_netdisk/driver.go | 38 +++- internal/db/slice_upload.go | 41 +++++ internal/fs/fs.go | 251 +++++++++++++++++--------- internal/model/reqres/upload.go | 8 +- internal/model/tables/slice_upload.go | 1 + server/handles/fsup.go | 55 ++++-- 7 files changed, 307 insertions(+), 112 deletions(-) diff --git a/drivers/123_open/driver.go b/drivers/123_open/driver.go index c2844a9aa..260c3a776 100644 --- a/drivers/123_open/driver.go +++ b/drivers/123_open/driver.go @@ -262,6 +262,15 @@ func (d *Open123) SliceUpload(ctx context.Context, req *tables.SliceUpload, slic if int(sliceno) >= len(sh) { return fmt.Errorf("slice number %d out of range, total slices: %d", sliceno, len(sh)) } + + if req.PreupID == "" { + return fmt.Errorf("preupload ID is empty for slice %d", sliceno) + } + + if req.Server == "" { + return fmt.Errorf("upload server is empty for slice %d", sliceno) + } + r := &UploadSliceReq{ Name: req.Name, PreuploadID: req.PreupID, @@ -270,13 +279,23 @@ func (d *Open123) SliceUpload(ctx context.Context, req *tables.SliceUpload, slic SliceMD5: sh[sliceno], SliceNo: int(sliceno) + 1, } - return d.uploadSlice(r) + + if err := d.uploadSlice(r); err != nil { + return fmt.Errorf("upload slice %d failed: %w", sliceno, err) + } + return nil } // UploadSliceComplete 分片上传完成 func (d *Open123) UploadSliceComplete(ctx context.Context, su *tables.SliceUpload) error { - - return d.sliceUpComplete(su.PreupID) + if su.PreupID == "" { + return fmt.Errorf("preupload ID is empty") + } + + if err := d.sliceUpComplete(su.PreupID); err != nil { + return fmt.Errorf("slice upload complete failed: %w", err) + } + return nil } var _ driver.Driver = (*Open123)(nil) diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 18f1392af..474523959 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -396,10 +396,18 @@ func (d *BaiduNetdisk) SliceUpload(ctx context.Context, req *tables.SliceUpload, SliceMd5: req.HashMd5256KB, }) if err != nil { - return err + return fmt.Errorf("precreate failed: %w", err) + } + if precreateResp == nil { + return fmt.Errorf("precreate returned nil response") } req.PreupID = precreateResp.Uploadid } + + if req.PreupID == "" { + return fmt.Errorf("preupload ID is empty for slice %d", sliceno) + } + err := d.uploadSlice(ctx, map[string]string{ "method": "upload", "access_token": d.AccessToken, @@ -408,8 +416,11 @@ func (d *BaiduNetdisk) SliceUpload(ctx context.Context, req *tables.SliceUpload, "uploadid": req.PreupID, "partseq": strconv.Itoa(int(sliceno)), }, req.Name, fd) - return err - + + if err != nil { + return fmt.Errorf("upload slice %d failed: %w", sliceno, err) + } + return nil } // Preup 预上传(自定以接口,为了适配自定义的分片上传) @@ -424,15 +435,28 @@ func (d *BaiduNetdisk) UploadSliceComplete(ctx context.Context, su *tables.Slice fp := filepath.Join(su.DstPath, su.Name) rsp := &SliceUpCompleteResp{} t := time.Now().Unix() - sh, err := json.Marshal(strings.Split(su.SliceHash, ",")) + + sliceHashList := strings.Split(su.SliceHash, ",") + if len(sliceHashList) == 0 { + return fmt.Errorf("slice hash list is empty") + } + + sh, err := json.Marshal(sliceHashList) if err != nil { - return err + return fmt.Errorf("failed to marshal slice hash: %w", err) } + b, err := d.create(fp, int64(su.Size), 0, su.PreupID, string(sh), rsp, t, t) if err != nil { - log.Error(err, rsp, string(b)) + log.Errorf("create file failed: %v, response: %v, body: %s", err, rsp, string(b)) + return fmt.Errorf("create file failed: %w", err) } - return err + + if rsp.Errno != 0 { + return fmt.Errorf("baidu response error: errno=%d", rsp.Errno) + } + + return nil } var _ driver.Driver = (*BaiduNetdisk)(nil) diff --git a/internal/db/slice_upload.go b/internal/db/slice_upload.go index e914785b3..f57895ae1 100644 --- a/internal/db/slice_upload.go +++ b/internal/db/slice_upload.go @@ -3,6 +3,7 @@ package db import ( "github.com/OpenListTeam/OpenList/v4/internal/model/tables" "github.com/pkg/errors" + "gorm.io/gorm" ) func CreateSliceUpload(su *tables.SliceUpload) error { @@ -14,6 +15,46 @@ func GetSliceUpload(wh map[string]any) (*tables.SliceUpload, error) { return su, db.Where(wh).First(su).Error } +// GetSliceUploadByTaskID 通过TaskID获取分片上传记录 +func GetSliceUploadByTaskID(taskID string) (*tables.SliceUpload, error) { + su := &tables.SliceUpload{} + return su, db.Where("task_id = ?", taskID).First(su).Error +} + func UpdateSliceUpload(su *tables.SliceUpload) error { return errors.WithStack(db.Save(su).Error) } + +// DeleteSliceUpload 删除分片上传记录 +func DeleteSliceUpload(id uint) error { + return errors.WithStack(db.Delete(&tables.SliceUpload{}, id).Error) +} + +// DeleteSliceUploadByTaskID 通过TaskID删除分片上传记录 +func DeleteSliceUploadByTaskID(taskID string) error { + return errors.WithStack(db.Where("task_id = ?", taskID).Delete(&tables.SliceUpload{}).Error) +} + +// UpdateSliceUploadWithTx 使用事务更新分片上传状态,确保数据一致性 +func UpdateSliceUploadWithTx(su *tables.SliceUpload) error { + return errors.WithStack(db.Transaction(func(tx *gorm.DB) error { + return tx.Save(su).Error + })) +} + +// UpdateSliceStatusAtomic 原子性地更新分片状态 +func UpdateSliceStatusAtomic(taskID string, sliceNum int, status []byte) error { + return errors.WithStack(db.Transaction(func(tx *gorm.DB) error { + // 先读取当前状态 + var su tables.SliceUpload + if err := tx.Where("task_id = ?", taskID).First(&su).Error; err != nil { + return err + } + + // 更新分片状态 + tables.SetSliceUploaded(su.SliceUploadStatus, sliceNum) + + // 保存更新 + return tx.Save(&su).Error + })) +} diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 0b10158ba..871829fe1 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -25,6 +25,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/task" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/pkg/errors" + "github.com/google/uuid" ) // the param named path of functions in this package is a mount path @@ -206,10 +207,12 @@ func PutURL(ctx context.Context, path, dstName, urlStr string) error { // Preup 预上传 func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.PreupReq) (*reqres.PreupResp, error) { + // 检查是否存在未完成的上传任务(用于断点续传) wh := map[string]any{} wh["dst_path"] = req.Path wh["name"] = req.Name wh["size"] = req.Size + wh["status"] = tables.SliceUploadStatusUploading // 只查找正在进行中的任务 if req.Hash.Md5 != "" { wh["hash_md5"] = req.Hash.Md5 } @@ -226,14 +229,15 @@ func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.Pr return nil, errors.WithStack(err) } - if su.ID != 0 { // 已存在 + if su.ID != 0 { // 找到未完成的上传任务,支持断点续传 return &reqres.PreupResp{ - UploadID: su.ID, + TaskID: su.TaskID, SliceSize: su.SliceSize, SliceCnt: su.SliceCnt, SliceUploadStatus: su.SliceUploadStatus, }, nil } + srcobj, err := op.Get(c, s, actualPath) if err != nil { log.Error(err) @@ -241,8 +245,12 @@ func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.Pr } user, _ := c.Value(conf.UserKey).(*model.User) - //不存在 + // 生成唯一的TaskID + taskID := uuid.New().String() + + //创建新的上传任务 createsu := &tables.SliceUpload{ + TaskID: taskID, DstPath: req.Path, DstID: srcobj.GetID(), Size: req.Size, @@ -270,7 +278,7 @@ func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.Pr Reuse: true, SliceCnt: 0, SliceSize: res.SliceSize, - UploadID: 0, + TaskID: taskID, }, nil } @@ -283,6 +291,7 @@ func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.Pr } createsu.SliceCnt = uint((req.Size + createsu.SliceSize - 1) / createsu.SliceSize) createsu.SliceUploadStatus = make([]byte, (createsu.SliceCnt+7)/8) + createsu.Status = tables.SliceUploadStatusWaiting // 设置初始状态 err = db.CreateSliceUpload(createsu) if err != nil { @@ -294,7 +303,7 @@ func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.Pr SliceUploadStatus: createsu.SliceUploadStatus, SliceSize: createsu.SliceSize, SliceCnt: createsu.SliceCnt, - UploadID: createsu.ID, + TaskID: createsu.TaskID, }, nil } @@ -305,7 +314,60 @@ type sliceup struct { sync.Mutex } -// 分片上传缓存 +// ensureTmpFile 确保临时文件存在且正确初始化,线程安全 +func (su *sliceup) ensureTmpFile() error { + su.Lock() + defer su.Unlock() + + if su.TmpFile == "" { + tf, err := os.CreateTemp(conf.Conf.TempDir, "file-*") + if err != nil { + return fmt.Errorf("CreateTemp error: %w", err) + } + + abspath := tf.Name() //这里返回的是绝对路径 + if err = os.Truncate(abspath, int64(su.Size)); err != nil { + tf.Close() // 确保文件被关闭 + os.Remove(abspath) // 清理文件 + return fmt.Errorf("Truncate error: %w", err) + } + + su.TmpFile = abspath + su.tmpFile = tf + return nil + } + + if su.tmpFile == nil { + var err error + su.tmpFile, err = os.OpenFile(su.TmpFile, os.O_RDWR, 0644) + if err != nil { + return fmt.Errorf("OpenFile error: %w", err) + } + } + return nil +} + +// cleanup 清理资源,线程安全 +func (su *sliceup) cleanup() { + su.Lock() + defer su.Unlock() + + if su.tmpFile != nil { + if closeErr := su.tmpFile.Close(); closeErr != nil { + log.Errorf("Failed to close tmp file: %v", closeErr) + } + su.tmpFile = nil + } + + if su.TmpFile != "" { + if removeErr := os.Remove(su.TmpFile); removeErr != nil && !os.IsNotExist(removeErr) { + log.Errorf("Failed to remove tmp file %s: %v", su.TmpFile, removeErr) + } + su.TmpFile = "" + } +} + +// 分片上传缓存,使用TaskID作为key var sliceupMap = sync.Map{} type sliceWriter struct { @@ -328,65 +390,88 @@ func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadS var msu *sliceup var err error - // 使用 LoadOrStore 避免并发竞态条件 - sa, loaded := sliceupMap.LoadOrStore(req.UploadID, nil) + // 使用 LoadOrStore 避免并发竞态条件,使用TaskID作为key + sa, loaded := sliceupMap.LoadOrStore(req.TaskID, nil) if !loaded { // 首次加载,需要从数据库获取 - su, e := db.GetSliceUpload(map[string]any{"id": req.UploadID}) + su, e := db.GetSliceUploadByTaskID(req.TaskID) if e != nil { - log.Errorf("failed get slice upload [%d]: %+v", req.UploadID, e) - sliceupMap.Delete(req.UploadID) // 清理无效的 key + log.Errorf("failed get slice upload [%s]: %+v", req.TaskID, e) + sliceupMap.Delete(req.TaskID) // 清理无效的 key return e } msu = &sliceup{ SliceUpload: su, } - sliceupMap.Store(req.UploadID, msu) + sliceupMap.Store(req.TaskID, msu) } else { msu = sa.(*sliceup) + // 如果缓存存在,需要刷新数据库状态以确保数据一致性 + if freshSu, err := db.GetSliceUploadByTaskID(req.TaskID); err == nil { + msu.Lock() + msu.SliceUpload = freshSu + msu.Unlock() + } } + + // 确保并发安全的错误处理 defer func() { if err != nil { + msu.Lock() msu.Status = tables.SliceUploadStatusFailed msu.Message = err.Error() - if updateErr := db.UpdateSliceUpload(msu.SliceUpload); updateErr != nil { + updateData := *msu.SliceUpload // 复制数据避免锁持有时间过长 + msu.Unlock() + + if updateErr := db.UpdateSliceUpload(&updateData); updateErr != nil { log.Errorf("Failed to update slice upload status: %v", updateErr) } } }() + // 使用锁保护状态检查 + msu.Lock() // 检查分片是否已上传过 if tables.IsSliceUploaded(msu.SliceUploadStatus, int(req.SliceNum)) { + msu.Unlock() log.Warnf("slice already uploaded,req:%+v", req) return nil } + msu.Unlock() if req.SliceHash != "" { + msu.Lock() sliceHash := []string{} // 分片hash //验证分片hash值 if req.SliceNum == 0 { //第一个分片,slicehash是所有的分片hash hs := strings.Split(req.SliceHash, ",") if len(hs) != int(msu.SliceCnt) { + msu.Unlock() msg := fmt.Sprintf("failed verify slice hash cnt req: %+v", req) log.Error(msg) return errors.New(msg) } // 更新分片hash msu.SliceHash = req.SliceHash - if err := db.UpdateSliceUpload(msu.SliceUpload); err != nil { - log.Error("UpdateSliceUpload error", msu.SliceUpload, err) + msu.Status = tables.SliceUploadStatusUploading + updateData := *msu.SliceUpload // 复制数据 + msu.Unlock() + + if err := db.UpdateSliceUpload(&updateData); err != nil { + log.Error("UpdateSliceUpload error", updateData, err) return err } - msu.Status = tables.SliceUploadStatusUploading sliceHash = hs } else { // 如果不是第一个分片,slicehash是当前分片hash sliceHash = strings.Split(msu.SliceHash, ",") - if req.SliceHash != sliceHash[req.SliceNum] { //比对分片hash是否与之前上传的一致 + if len(sliceHash) <= int(req.SliceNum) || req.SliceHash != sliceHash[req.SliceNum] { //比对分片hash是否与之前上传的一致 + msu.Unlock() msg := fmt.Sprintf("failed verify slice hash,req: [%+v]", req) log.Error(msg) return errors.New(msg) } + msu.Unlock() } } @@ -398,33 +483,10 @@ func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadS } default: //其他网盘先缓存到本地 - msu.Lock() - if msu.TmpFile == "" { - tf, err := os.CreateTemp(conf.Conf.TempDir, "file-*") - if err != nil { - msu.Unlock() - log.Error("CreateTemp error", req, err) - return err - } - abspath := tf.Name() //这里返回的是绝对路径 - err = os.Truncate(abspath, int64(msu.Size)) - if err != nil { - msu.Unlock() - log.Error("Truncate error", req, err) - return err - } - msu.TmpFile = abspath - msu.tmpFile = tf - } - if msu.tmpFile == nil { - msu.tmpFile, err = os.OpenFile(msu.TmpFile, os.O_RDWR, 0644) - if err != nil { - msu.Unlock() - log.Error("OpenFile error", req, msu.TmpFile, err) - return err - } + if err := msu.ensureTmpFile(); err != nil { + log.Error("ensureTmpFile error", req, err) + return err } - msu.Unlock() // 流式复制,减少内存占用 sw := &sliceWriter{ @@ -432,17 +494,21 @@ func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadS offset: int64(req.SliceNum) * int64(msu.SliceSize), } _, err := utils.CopyWithBuffer(sw, file) - if err != nil { log.Error("Copy error", req, err) return err } } + + // 原子性更新分片状态 + msu.Lock() tables.SetSliceUploaded(msu.SliceUploadStatus, int(req.SliceNum)) + updateData := *msu.SliceUpload // 复制数据 + msu.Unlock() - err = db.UpdateSliceUpload(msu.SliceUpload) + err = db.UpdateSliceUpload(&updateData) if err != nil { - log.Error("UpdateSliceUpload error", msu.SliceUpload, err) + log.Error("UpdateSliceUpload error", updateData, err) return err } return nil @@ -450,54 +516,58 @@ func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadS } // SliceUpComplete 完成分片上传 -func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) (*reqres.UploadSliceCompleteResp, error) { +func SliceUpComplete(ctx context.Context, storage driver.Driver, taskID string) (*reqres.UploadSliceCompleteResp, error) { var msu *sliceup var err error - sa, ok := sliceupMap.Load(uploadID) + sa, ok := sliceupMap.Load(taskID) if !ok { - su, err := db.GetSliceUpload(map[string]any{"id": uploadID}) + su, err := db.GetSliceUploadByTaskID(taskID) if err != nil { - log.Errorf("failed get slice upload [%d]: %+v", uploadID, err) + log.Errorf("failed get slice upload [%s]: %+v", taskID, err) return nil, err } msu = &sliceup{ SliceUpload: su, } - } else { msu = sa.(*sliceup) } - if !tables.IsAllSliceUploaded(msu.SliceUploadStatus, msu.SliceCnt) { + + // 检查是否所有分片都已上传 + msu.Lock() + allUploaded := tables.IsAllSliceUploaded(msu.SliceUploadStatus, msu.SliceCnt) + msu.Unlock() + + if !allUploaded { return &reqres.UploadSliceCompleteResp{ Complete: 0, SliceUploadStatus: msu.SliceUploadStatus, - UploadID: msu.ID, + TaskID: msu.TaskID, }, nil - } defer func() { + // 确保资源清理和缓存删除 + msu.cleanup() + sliceupMap.Delete(msu.TaskID) + if err != nil { + msu.Lock() msu.Status = tables.SliceUploadStatusFailed msu.Message = err.Error() - if updateErr := db.UpdateSliceUpload(msu.SliceUpload); updateErr != nil { + updateData := *msu.SliceUpload + msu.Unlock() + + if updateErr := db.UpdateSliceUpload(&updateData); updateErr != nil { log.Errorf("Failed to update slice upload status: %v", updateErr) } - } - // 确保资源清理 - if msu.tmpFile != nil { - if closeErr := msu.tmpFile.Close(); closeErr != nil { - log.Errorf("Failed to close tmp file: %v", closeErr) - } - } - if msu.TmpFile != "" { - if removeErr := os.Remove(msu.TmpFile); removeErr != nil && !os.IsNotExist(removeErr) { - log.Errorf("Failed to remove tmp file %s: %v", msu.TmpFile, removeErr) + } else { + // 上传成功后从数据库中删除记录,允许重复上传 + if deleteErr := db.DeleteSliceUploadByTaskID(msu.TaskID); deleteErr != nil { + log.Errorf("Failed to delete slice upload record: %v", deleteErr) } } - sliceupMap.Delete(msu.ID) - }() switch s := storage.(type) { case driver.IUploadSliceComplete: @@ -506,26 +576,37 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) log.Error("UploadSliceComplete error", msu.SliceUpload, err) return nil, err } + + msu.Lock() msu.Status = tables.SliceUploadStatusComplete - db.UpdateSliceUpload(msu.SliceUpload) - rsp := &reqres.UploadSliceCompleteResp{ - Complete: 1, - UploadID: msu.ID, + updateData := *msu.SliceUpload + msu.Unlock() + + if updateErr := db.UpdateSliceUpload(&updateData); updateErr != nil { + log.Errorf("Failed to update slice upload status to complete: %v", updateErr) } - return rsp, nil + + return &reqres.UploadSliceCompleteResp{ + Complete: 1, + TaskID: msu.TaskID, + }, nil default: - //其他网盘客户端上传到本地后,上传到网盘,使用任务处理 - if msu.tmpFile == nil { - err := fmt.Errorf("tmp file not found [%d]", uploadID) + // 其他网盘客户端上传到本地后,上传到网盘,使用任务处理 + msu.Lock() + tmpFile := msu.tmpFile + msu.Unlock() + + if tmpFile == nil { + err := fmt.Errorf("tmp file not found [%s]", taskID) log.Error(err) return nil, err } + var hashInfo utils.HashInfo if msu.HashMd5 != "" { hashInfo = utils.NewHashInfo(utils.MD5, msu.HashMd5) - } - if msu.HashSha1 != "" { + } else if msu.HashSha1 != "" { hashInfo = utils.NewHashInfo(utils.SHA1, msu.HashSha1) } @@ -538,11 +619,15 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) }, } file.Mimetype = utils.GetMimeType(msu.Name) + if msu.AsTask { - file.SetTmpFile(msu.tmpFile) - // 置空,避免defer中被清理 + file.SetTmpFile(tmpFile) + // 防止defer中清理文件 + msu.Lock() msu.tmpFile = nil msu.TmpFile = "" + msu.Unlock() + _, err = putAsTask(ctx, msu.DstPath, file) if err != nil { log.Error("putAsTask error", msu.SliceUpload, err) @@ -550,10 +635,11 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) } return &reqres.UploadSliceCompleteResp{ Complete: 2, - UploadID: msu.ID, + TaskID: msu.TaskID, }, nil } - file.Reader = msu.tmpFile + + file.Reader = tmpFile err = op.Put(ctx, storage, msu.ActualPath, file, nil) if err != nil { log.Error("Put error", msu.SliceUpload, err) @@ -561,8 +647,7 @@ func SliceUpComplete(ctx context.Context, storage driver.Driver, uploadID uint) } return &reqres.UploadSliceCompleteResp{ Complete: 1, - UploadID: msu.ID, + TaskID: msu.TaskID, }, nil } - } diff --git a/internal/model/reqres/upload.go b/internal/model/reqres/upload.go index 51f917669..4e3684b56 100644 --- a/internal/model/reqres/upload.go +++ b/internal/model/reqres/upload.go @@ -14,7 +14,7 @@ type PreupReq struct { // PreupResp 预上传响应 type PreupResp struct { - UploadID uint `json:"upload_id"` // 上传ID,不是网盘返回的,是本地数据的id + TaskID string `json:"task_id"` // 任务ID,使用UUID SliceSize int64 `json:"slice_size"` //分片大小,单位:字节 SliceCnt uint `json:"slice_cnt"` // 分片数量 SliceUploadStatus []byte `json:"slice_upload_status"` // 分片上传状态 @@ -23,19 +23,19 @@ type PreupResp struct { // UploadSliceReq 上传分片请求 type UploadSliceReq struct { - UploadID uint `json:"upload_id"` // 上传ID,不是网盘返回的,是本地数据的id + TaskID string `json:"task_id"` // 任务ID,使用UUID SliceHash string `json:"slice_hash"` // 分片hash,如果是第一个分片,则需包含所有分片hash,用","分割 SliceNum uint `json:"slice_num"` // 分片序号 } // UploadSliceCompleteReq 分片上传完成请求 type UploadSliceCompleteReq struct { - UploadID uint `json:"upload_id"` // 上传ID,不是网盘返回的,是本地数据的id + TaskID string `json:"task_id"` // 任务ID,使用UUID } // UploadSliceCompleteResp 分片上传完成响应 type UploadSliceCompleteResp struct { - UploadID uint `json:"upload_id"` // 上传ID,不是网盘返回的,是本地数据的id + TaskID string `json:"task_id"` // 任务ID,使用UUID SliceUploadStatus []byte `json:"slice_upload_status"` // 分片上传状态 Complete uint `json:"complete"` //完成状态 0 未完成,分片缺失 1 完成 2 成功上传到代理服务 } diff --git a/internal/model/tables/slice_upload.go b/internal/model/tables/slice_upload.go index 72d18b213..f753d6919 100644 --- a/internal/model/tables/slice_upload.go +++ b/internal/model/tables/slice_upload.go @@ -18,6 +18,7 @@ const ( // SliceUpload 分片上传数据表 type SliceUpload struct { Base + TaskID string `json:"task_id" gorm:"uniqueIndex;type:varchar(36)"` // 任务ID,使用UUID PreupID string `json:"preup_id"` // 网盘返回的预上传id SliceSize int64 `json:"slice_size"` // 分片大小,单位:字节 DstID string `json:"dst_id"` // 目标文件夹ID,部分网盘需要 diff --git a/server/handles/fsup.go b/server/handles/fsup.go index d444923a2..318d74b06 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -233,15 +233,26 @@ func FsPreup(c *gin.Context) { req := &reqres.PreupReq{} err := c.ShouldBindJSON(req) if err != nil { - common.ErrorResp(c, err, 400) + common.ErrorResp(c, fmt.Errorf("invalid request body: %w", err), 400) + return + } + + // 基本参数验证 + if req.Name == "" { + common.ErrorResp(c, fmt.Errorf("file name is required"), 400) return } + if req.Size <= 0 { + common.ErrorResp(c, fmt.Errorf("file size must be greater than 0"), 400) + return + } + storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) path := c.Request.Context().Value(conf.PathKey).(string) res, err := fs.Preup(c.Request.Context(), storage, path, req) if err != nil { - common.ErrorResp(c, err, 500) + common.ErrorResp(c, fmt.Errorf("preup failed: %w", err), 500) return } common.SuccessResp(c, res) @@ -253,34 +264,43 @@ func FsUpSlice(c *gin.Context) { req.SliceHash = c.PostForm("slice_hash") sn, err := strconv.ParseUint(c.PostForm("slice_num"), 10, 32) if err != nil { - common.ErrorResp(c, err, 400) + common.ErrorResp(c, fmt.Errorf("invalid slice_num: %w", err), 400) return } req.SliceNum = uint(sn) - upid, err := strconv.ParseUint(c.PostForm("upload_id"), 10, 64) - if err != nil { - common.ErrorResp(c, err, 400) + req.TaskID = c.PostForm("task_id") + if req.TaskID == "" { + common.ErrorResp(c, fmt.Errorf("task_id is required"), 400) return } - req.UploadID = uint(upid) file, err := c.FormFile("slice") if err != nil { - common.ErrorResp(c, err, 400) + common.ErrorResp(c, fmt.Errorf("failed to get slice file: %w", err), 400) + return + } + + if file.Size == 0 { + common.ErrorResp(c, fmt.Errorf("slice file is empty"), 400) return } + fd, err := file.Open() if err != nil { - common.ErrorResp(c, err, 500) + common.ErrorResp(c, fmt.Errorf("failed to open slice file: %w", err), 500) return } - defer fd.Close() + defer func() { + if closeErr := fd.Close(); closeErr != nil { + log.Errorf("Failed to close slice file: %v", closeErr) + } + }() storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) err = fs.UploadSlice(c.Request.Context(), storage, req, fd) if err != nil { - common.ErrorResp(c, err, 500) + common.ErrorResp(c, fmt.Errorf("upload slice failed: %w", err), 500) return } common.SuccessResp(c) @@ -291,15 +311,20 @@ func FsUpSliceComplete(c *gin.Context) { req := &reqres.UploadSliceCompleteReq{} err := c.ShouldBindJSON(req) if err != nil { - common.ErrorResp(c, err, 400) + common.ErrorResp(c, fmt.Errorf("invalid request body: %w", err), 400) + return + } + + if req.TaskID == "" { + common.ErrorResp(c, fmt.Errorf("task_id is required"), 400) return } + storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) - rsp, err := fs.SliceUpComplete(c.Request.Context(), storage, req.UploadID) + rsp, err := fs.SliceUpComplete(c.Request.Context(), storage, req.TaskID) if err != nil { - common.ErrorResp(c, err, 500) + common.ErrorResp(c, fmt.Errorf("slice upload complete failed: %w", err), 500) return } common.SuccessResp(c, rsp) - } From 109b1166383aaeb6ea00c2893ba46728a7acf99f Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Fri, 5 Sep 2025 20:33:21 +0800 Subject: [PATCH 08/26] fix(imports): fix build bugs --- drivers/baidu_netdisk/driver.go | 1 + server/handles/fsup.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 474523959..12d581d02 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "encoding/json" "errors" + "fmt" "io" "net/url" "os" diff --git a/server/handles/fsup.go b/server/handles/fsup.go index 318d74b06..d99a22d0e 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -1,6 +1,7 @@ package handles import ( + "fmt" "io" "net/url" stdpath "path" @@ -17,6 +18,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/server/common" "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" ) func getLastModified(c *gin.Context) time.Time { From e03541205a9788552e3d59ba86375aa6fa6b616e Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Fri, 5 Sep 2025 21:30:44 +0800 Subject: [PATCH 09/26] feat(upload): Refactored the shard upload manager to optimize upload session and state management --- internal/fs/fs.go | 456 +------------------------------------- internal/fs/sliceup.go | 488 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 494 insertions(+), 450 deletions(-) create mode 100644 internal/fs/sliceup.go diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 871829fe1..011d9866b 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -2,30 +2,18 @@ package fs import ( "context" - "fmt" "io" "mime/multipart" - "os" - "strings" - "sync" - "time" log "github.com/sirupsen/logrus" - "gorm.io/gorm" - "github.com/OpenListTeam/OpenList/v4/internal/conf" - "github.com/OpenListTeam/OpenList/v4/internal/db" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/model/reqres" - "github.com/OpenListTeam/OpenList/v4/internal/model/tables" "github.com/OpenListTeam/OpenList/v4/internal/op" - "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/internal/task" - "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/pkg/errors" - "github.com/google/uuid" ) // the param named path of functions in this package is a mount path @@ -205,449 +193,17 @@ func PutURL(ctx context.Context, path, dstName, urlStr string) error { /// 分片上传功能-------------------------------------------------------------------- -// Preup 预上传 +// Preup 预上传 - 使用新的管理器重构 func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.PreupReq) (*reqres.PreupResp, error) { - // 检查是否存在未完成的上传任务(用于断点续传) - wh := map[string]any{} - wh["dst_path"] = req.Path - wh["name"] = req.Name - wh["size"] = req.Size - wh["status"] = tables.SliceUploadStatusUploading // 只查找正在进行中的任务 - if req.Hash.Md5 != "" { - wh["hash_md5"] = req.Hash.Md5 - } - if req.Hash.Sha1 != "" { - wh["hash_sha1"] = req.Hash.Sha1 - } - if req.Hash.Md5256KB != "" { - wh["hash_md5_256kb"] = req.Hash.Md5256KB - } - - su, err := db.GetSliceUpload(wh) - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - log.Error("GetSliceUpload", err) - return nil, errors.WithStack(err) - } - - if su.ID != 0 { // 找到未完成的上传任务,支持断点续传 - return &reqres.PreupResp{ - TaskID: su.TaskID, - SliceSize: su.SliceSize, - SliceCnt: su.SliceCnt, - SliceUploadStatus: su.SliceUploadStatus, - }, nil - } - - srcobj, err := op.Get(c, s, actualPath) - if err != nil { - log.Error(err) - return nil, errors.WithStack(err) - } - user, _ := c.Value(conf.UserKey).(*model.User) - - // 生成唯一的TaskID - taskID := uuid.New().String() - - //创建新的上传任务 - createsu := &tables.SliceUpload{ - TaskID: taskID, - DstPath: req.Path, - DstID: srcobj.GetID(), - Size: req.Size, - Name: req.Name, - HashMd5: req.Hash.Md5, - HashMd5256KB: req.Hash.Md5256KB, - HashSha1: req.Hash.Sha1, - Overwrite: req.Overwrite, - ActualPath: actualPath, - UserID: user.ID, - AsTask: req.AsTask, - } - log.Infof("storage mount path %s", s.GetStorage().MountPath) - switch st := s.(type) { - case driver.IPreup: - log.Info("preup support") - res, err := st.Preup(c, srcobj, req) - if err != nil { - log.Error("Preup error", req, err) - return nil, errors.WithStack(err) - } - log.Info("Preup success", res) - if res.Reuse { //秒传 - return &reqres.PreupResp{ - Reuse: true, - SliceCnt: 0, - SliceSize: res.SliceSize, - TaskID: taskID, - }, nil - - } - createsu.PreupID = res.PreupID - createsu.SliceSize = res.SliceSize - createsu.Server = res.Server - default: - log.Info("Preup not support") - createsu.SliceSize = 10 * utils.MB - } - createsu.SliceCnt = uint((req.Size + createsu.SliceSize - 1) / createsu.SliceSize) - createsu.SliceUploadStatus = make([]byte, (createsu.SliceCnt+7)/8) - createsu.Status = tables.SliceUploadStatusWaiting // 设置初始状态 - - err = db.CreateSliceUpload(createsu) - if err != nil { - log.Error("CreateSliceUpload error", createsu, err) - return nil, errors.WithStack(err) - } - return &reqres.PreupResp{ - Reuse: false, - SliceUploadStatus: createsu.SliceUploadStatus, - SliceSize: createsu.SliceSize, - SliceCnt: createsu.SliceCnt, - TaskID: createsu.TaskID, - }, nil - -} - -type sliceup struct { - *tables.SliceUpload - tmpFile *os.File - sync.Mutex -} - -// ensureTmpFile 确保临时文件存在且正确初始化,线程安全 -func (su *sliceup) ensureTmpFile() error { - su.Lock() - defer su.Unlock() - - if su.TmpFile == "" { - tf, err := os.CreateTemp(conf.Conf.TempDir, "file-*") - if err != nil { - return fmt.Errorf("CreateTemp error: %w", err) - } - - abspath := tf.Name() //这里返回的是绝对路径 - if err = os.Truncate(abspath, int64(su.Size)); err != nil { - tf.Close() // 确保文件被关闭 - os.Remove(abspath) // 清理文件 - return fmt.Errorf("Truncate error: %w", err) - } - - su.TmpFile = abspath - su.tmpFile = tf - return nil - } - - if su.tmpFile == nil { - var err error - su.tmpFile, err = os.OpenFile(su.TmpFile, os.O_RDWR, 0644) - if err != nil { - return fmt.Errorf("OpenFile error: %w", err) - } - } - return nil -} - -// cleanup 清理资源,线程安全 -func (su *sliceup) cleanup() { - su.Lock() - defer su.Unlock() - - if su.tmpFile != nil { - if closeErr := su.tmpFile.Close(); closeErr != nil { - log.Errorf("Failed to close tmp file: %v", closeErr) - } - su.tmpFile = nil - } - - if su.TmpFile != "" { - if removeErr := os.Remove(su.TmpFile); removeErr != nil && !os.IsNotExist(removeErr) { - log.Errorf("Failed to remove tmp file %s: %v", su.TmpFile, removeErr) - } - su.TmpFile = "" - } -} - -// 分片上传缓存,使用TaskID作为key -var sliceupMap = sync.Map{} - -type sliceWriter struct { - file *os.File - offset int64 + return globalSliceManager.CreateSession(c, s, actualPath, req) } -// Write implements io.Writer interface -// 虽然每个分片都定义了一个sliceWriter -// 但是Write方法会在同一个分片复制过程中多次调用, -// 所以要更新自身的offset -func (sw *sliceWriter) Write(p []byte) (int, error) { - n, err := sw.file.WriteAt(p, sw.offset) - sw.offset += int64(n) - return n, err -} - -// UploadSlice 上传切片,第一个分片必须先上传 +// UploadSlice 上传切片 - 使用新的管理器重构 func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, file multipart.File) error { - var msu *sliceup - var err error - - // 使用 LoadOrStore 避免并发竞态条件,使用TaskID作为key - sa, loaded := sliceupMap.LoadOrStore(req.TaskID, nil) - if !loaded { - // 首次加载,需要从数据库获取 - su, e := db.GetSliceUploadByTaskID(req.TaskID) - if e != nil { - log.Errorf("failed get slice upload [%s]: %+v", req.TaskID, e) - sliceupMap.Delete(req.TaskID) // 清理无效的 key - return e - } - msu = &sliceup{ - SliceUpload: su, - } - sliceupMap.Store(req.TaskID, msu) - } else { - msu = sa.(*sliceup) - // 如果缓存存在,需要刷新数据库状态以确保数据一致性 - if freshSu, err := db.GetSliceUploadByTaskID(req.TaskID); err == nil { - msu.Lock() - msu.SliceUpload = freshSu - msu.Unlock() - } - } - - // 确保并发安全的错误处理 - defer func() { - if err != nil { - msu.Lock() - msu.Status = tables.SliceUploadStatusFailed - msu.Message = err.Error() - updateData := *msu.SliceUpload // 复制数据避免锁持有时间过长 - msu.Unlock() - - if updateErr := db.UpdateSliceUpload(&updateData); updateErr != nil { - log.Errorf("Failed to update slice upload status: %v", updateErr) - } - } - }() - - // 使用锁保护状态检查 - msu.Lock() - // 检查分片是否已上传过 - if tables.IsSliceUploaded(msu.SliceUploadStatus, int(req.SliceNum)) { - msu.Unlock() - log.Warnf("slice already uploaded,req:%+v", req) - return nil - } - msu.Unlock() - - if req.SliceHash != "" { - msu.Lock() - sliceHash := []string{} // 分片hash - - //验证分片hash值 - if req.SliceNum == 0 { //第一个分片,slicehash是所有的分片hash - hs := strings.Split(req.SliceHash, ",") - if len(hs) != int(msu.SliceCnt) { - msu.Unlock() - msg := fmt.Sprintf("failed verify slice hash cnt req: %+v", req) - log.Error(msg) - return errors.New(msg) - } - // 更新分片hash - msu.SliceHash = req.SliceHash - msu.Status = tables.SliceUploadStatusUploading - updateData := *msu.SliceUpload // 复制数据 - msu.Unlock() - - if err := db.UpdateSliceUpload(&updateData); err != nil { - log.Error("UpdateSliceUpload error", updateData, err) - return err - } - sliceHash = hs - } else { // 如果不是第一个分片,slicehash是当前分片hash - sliceHash = strings.Split(msu.SliceHash, ",") - if len(sliceHash) <= int(req.SliceNum) || req.SliceHash != sliceHash[req.SliceNum] { //比对分片hash是否与之前上传的一致 - msu.Unlock() - msg := fmt.Sprintf("failed verify slice hash,req: [%+v]", req) - log.Error(msg) - return errors.New(msg) - } - msu.Unlock() - } - } - - switch s := storage.(type) { - case driver.ISliceUpload: - if err := s.SliceUpload(ctx, msu.SliceUpload, req.SliceNum, file); err != nil { - log.Error("SliceUpload error", req, err) - return err - } - - default: //其他网盘先缓存到本地 - if err := msu.ensureTmpFile(); err != nil { - log.Error("ensureTmpFile error", req, err) - return err - } - - // 流式复制,减少内存占用 - sw := &sliceWriter{ - file: msu.tmpFile, - offset: int64(req.SliceNum) * int64(msu.SliceSize), - } - _, err := utils.CopyWithBuffer(sw, file) - if err != nil { - log.Error("Copy error", req, err) - return err - } - } - - // 原子性更新分片状态 - msu.Lock() - tables.SetSliceUploaded(msu.SliceUploadStatus, int(req.SliceNum)) - updateData := *msu.SliceUpload // 复制数据 - msu.Unlock() - - err = db.UpdateSliceUpload(&updateData) - if err != nil { - log.Error("UpdateSliceUpload error", updateData, err) - return err - } - return nil - + return globalSliceManager.UploadSlice(ctx, storage, req, file) } -// SliceUpComplete 完成分片上传 +// SliceUpComplete 完成分片上传 - 使用新的管理器重构 func SliceUpComplete(ctx context.Context, storage driver.Driver, taskID string) (*reqres.UploadSliceCompleteResp, error) { - var msu *sliceup - var err error - - sa, ok := sliceupMap.Load(taskID) - if !ok { - su, err := db.GetSliceUploadByTaskID(taskID) - if err != nil { - log.Errorf("failed get slice upload [%s]: %+v", taskID, err) - return nil, err - } - msu = &sliceup{ - SliceUpload: su, - } - } else { - msu = sa.(*sliceup) - } - - // 检查是否所有分片都已上传 - msu.Lock() - allUploaded := tables.IsAllSliceUploaded(msu.SliceUploadStatus, msu.SliceCnt) - msu.Unlock() - - if !allUploaded { - return &reqres.UploadSliceCompleteResp{ - Complete: 0, - SliceUploadStatus: msu.SliceUploadStatus, - TaskID: msu.TaskID, - }, nil - } - - defer func() { - // 确保资源清理和缓存删除 - msu.cleanup() - sliceupMap.Delete(msu.TaskID) - - if err != nil { - msu.Lock() - msu.Status = tables.SliceUploadStatusFailed - msu.Message = err.Error() - updateData := *msu.SliceUpload - msu.Unlock() - - if updateErr := db.UpdateSliceUpload(&updateData); updateErr != nil { - log.Errorf("Failed to update slice upload status: %v", updateErr) - } - } else { - // 上传成功后从数据库中删除记录,允许重复上传 - if deleteErr := db.DeleteSliceUploadByTaskID(msu.TaskID); deleteErr != nil { - log.Errorf("Failed to delete slice upload record: %v", deleteErr) - } - } - }() - switch s := storage.(type) { - case driver.IUploadSliceComplete: - err = s.UploadSliceComplete(ctx, msu.SliceUpload) - if err != nil { - log.Error("UploadSliceComplete error", msu.SliceUpload, err) - return nil, err - } - - msu.Lock() - msu.Status = tables.SliceUploadStatusComplete - updateData := *msu.SliceUpload - msu.Unlock() - - if updateErr := db.UpdateSliceUpload(&updateData); updateErr != nil { - log.Errorf("Failed to update slice upload status to complete: %v", updateErr) - } - - return &reqres.UploadSliceCompleteResp{ - Complete: 1, - TaskID: msu.TaskID, - }, nil - - default: - // 其他网盘客户端上传到本地后,上传到网盘,使用任务处理 - msu.Lock() - tmpFile := msu.tmpFile - msu.Unlock() - - if tmpFile == nil { - err := fmt.Errorf("tmp file not found [%s]", taskID) - log.Error(err) - return nil, err - } - - var hashInfo utils.HashInfo - if msu.HashMd5 != "" { - hashInfo = utils.NewHashInfo(utils.MD5, msu.HashMd5) - } else if msu.HashSha1 != "" { - hashInfo = utils.NewHashInfo(utils.SHA1, msu.HashSha1) - } - - file := &stream.FileStream{ - Obj: &model.Object{ - Name: msu.Name, - Size: msu.Size, - Modified: time.Now(), - HashInfo: hashInfo, - }, - } - file.Mimetype = utils.GetMimeType(msu.Name) - - if msu.AsTask { - file.SetTmpFile(tmpFile) - // 防止defer中清理文件 - msu.Lock() - msu.tmpFile = nil - msu.TmpFile = "" - msu.Unlock() - - _, err = putAsTask(ctx, msu.DstPath, file) - if err != nil { - log.Error("putAsTask error", msu.SliceUpload, err) - return nil, err - } - return &reqres.UploadSliceCompleteResp{ - Complete: 2, - TaskID: msu.TaskID, - }, nil - } - - file.Reader = tmpFile - err = op.Put(ctx, storage, msu.ActualPath, file, nil) - if err != nil { - log.Error("Put error", msu.SliceUpload, err) - return nil, err - } - return &reqres.UploadSliceCompleteResp{ - Complete: 1, - TaskID: msu.TaskID, - }, nil - } + return globalSliceManager.CompleteUpload(ctx, storage, taskID) } diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go new file mode 100644 index 000000000..c0310b903 --- /dev/null +++ b/internal/fs/sliceup.go @@ -0,0 +1,488 @@ +package fs + +import ( + "context" + "fmt" + "mime/multipart" + "os" + "strings" + "sync" + "time" + + "github.com/OpenListTeam/OpenList/v4/internal/conf" + "github.com/OpenListTeam/OpenList/v4/internal/db" + "github.com/OpenListTeam/OpenList/v4/internal/driver" + "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/model/reqres" + "github.com/OpenListTeam/OpenList/v4/internal/model/tables" + "github.com/OpenListTeam/OpenList/v4/internal/op" + "github.com/OpenListTeam/OpenList/v4/internal/stream" + "github.com/OpenListTeam/OpenList/v4/pkg/utils" + "github.com/google/uuid" + "github.com/pkg/errors" + "gorm.io/gorm" + log "github.com/sirupsen/logrus" +) + +// SliceUploadManager 分片上传管理器 +type SliceUploadManager struct { + cache sync.Map // TaskID -> *SliceUploadSession + tempDir string // 临时文件目录 +} + +// SliceUploadSession 分片上传会话 +type SliceUploadSession struct { + *tables.SliceUpload + tmpFile *os.File + mutex sync.Mutex // 使用Mutex而不是RWMutex,保持与原始实现一致 +} + +// NewSliceUploadManager 创建分片上传管理器 +func NewSliceUploadManager() *SliceUploadManager { + return &SliceUploadManager{ + tempDir: conf.Conf.TempDir, + } +} + +// CreateSession 创建新的上传会话 - 完整实现Preup逻辑 +func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.Driver, actualPath string, req *reqres.PreupReq) (*reqres.PreupResp, error) { + // 检查是否存在未完成的上传任务(用于断点续传) + wh := map[string]any{ + "dst_path": req.Path, + "name": req.Name, + "size": req.Size, + "status": tables.SliceUploadStatusUploading, // 只查找正在进行中的任务 + } + if req.Hash.Md5 != "" { + wh["hash_md5"] = req.Hash.Md5 + } + if req.Hash.Sha1 != "" { + wh["hash_sha1"] = req.Hash.Sha1 + } + if req.Hash.Md5256KB != "" { + wh["hash_md5_256kb"] = req.Hash.Md5256KB + } + + su, err := db.GetSliceUpload(wh) + if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { + log.Error("GetSliceUpload", err) + return nil, errors.WithStack(err) + } + + if su.ID != 0 { // 找到未完成的上传任务,支持断点续传 + session := &SliceUploadSession{SliceUpload: su} + m.cache.Store(su.TaskID, session) + return &reqres.PreupResp{ + TaskID: su.TaskID, + SliceSize: su.SliceSize, + SliceCnt: su.SliceCnt, + SliceUploadStatus: su.SliceUploadStatus, + }, nil + } + + srcobj, err := op.Get(ctx, storage, actualPath) + if err != nil { + log.Error(err) + return nil, errors.WithStack(err) + } + user, _ := ctx.Value(conf.UserKey).(*model.User) + + // 生成唯一的TaskID + taskID := uuid.New().String() + + //创建新的上传任务 + createsu := &tables.SliceUpload{ + TaskID: taskID, + DstPath: req.Path, + DstID: srcobj.GetID(), + Size: req.Size, + Name: req.Name, + HashMd5: req.Hash.Md5, + HashMd5256KB: req.Hash.Md5256KB, + HashSha1: req.Hash.Sha1, + Overwrite: req.Overwrite, + ActualPath: actualPath, + AsTask: req.AsTask, + } + if user != nil { + createsu.UserID = user.ID + } + log.Infof("storage mount path %s", storage.GetStorage().MountPath) + + switch st := storage.(type) { + case driver.IPreup: + log.Info("preup support") + res, err := st.Preup(ctx, srcobj, req) + if err != nil { + log.Error("Preup error", req, err) + return nil, errors.WithStack(err) + } + log.Info("Preup success", res) + if res.Reuse { //秒传 + return &reqres.PreupResp{ + Reuse: true, + SliceCnt: 0, + SliceSize: res.SliceSize, + TaskID: taskID, + }, nil + } + createsu.PreupID = res.PreupID + createsu.SliceSize = res.SliceSize + createsu.Server = res.Server + default: + log.Info("Preup not support") + createsu.SliceSize = 10 * utils.MB + } + + createsu.SliceCnt = uint((req.Size + createsu.SliceSize - 1) / createsu.SliceSize) + createsu.SliceUploadStatus = make([]byte, (createsu.SliceCnt+7)/8) + createsu.Status = tables.SliceUploadStatusWaiting // 设置初始状态 + + err = db.CreateSliceUpload(createsu) + if err != nil { + log.Error("CreateSliceUpload error", createsu, err) + return nil, errors.WithStack(err) + } + + session := &SliceUploadSession{SliceUpload: createsu} + m.cache.Store(taskID, session) + + return &reqres.PreupResp{ + Reuse: false, + SliceUploadStatus: createsu.SliceUploadStatus, + SliceSize: createsu.SliceSize, + SliceCnt: createsu.SliceCnt, + TaskID: createsu.TaskID, + }, nil +} + +// getOrLoadSession 获取或加载会话,提高代码复用性 +func (m *SliceUploadManager) getOrLoadSession(taskID string) (*SliceUploadSession, error) { + sa, loaded := m.cache.LoadOrStore(taskID, (*SliceUploadSession)(nil)) + if !loaded { + // 首次加载,需要从数据库获取 + su, err := db.GetSliceUploadByTaskID(taskID) + if err != nil { + m.cache.Delete(taskID) // 清理无效的 key + return nil, errors.WithMessagef(err, "failed get slice upload [%s]", taskID) + } + session := &SliceUploadSession{ + SliceUpload: su, + } + m.cache.Store(taskID, session) + return session, nil + } + + // 缓存中存在,但可能是nil值,需要检查 + if sa == nil { + // 说明之前存储了nil,需要重新从数据库加载 + su, err := db.GetSliceUploadByTaskID(taskID) + if err != nil { + m.cache.Delete(taskID) + return nil, errors.WithMessagef(err, "failed get slice upload [%s]", taskID) + } + session := &SliceUploadSession{ + SliceUpload: su, + } + m.cache.Store(taskID, session) + return session, nil + } + + session := sa.(*SliceUploadSession) + // 刷新数据库状态以确保数据一致性 + if freshSu, err := db.GetSliceUploadByTaskID(taskID); err == nil { + session.mutex.Lock() + session.SliceUpload = freshSu + session.mutex.Unlock() + } + return session, nil +} + +// UploadSlice 上传分片 - 完整实现原始逻辑 +func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, file multipart.File) error { + var err error + + session, err := m.getOrLoadSession(req.TaskID) + if err != nil { + log.Errorf("failed to get session: %+v", err) + return err + } + + // 确保并发安全的错误处理 + defer func() { + if err != nil { + session.mutex.Lock() + session.Status = tables.SliceUploadStatusFailed + session.Message = err.Error() + updateData := *session.SliceUpload // 复制数据避免锁持有时间过长 + session.mutex.Unlock() + + if updateErr := db.UpdateSliceUpload(&updateData); updateErr != nil { + log.Errorf("Failed to update slice upload status: %v", updateErr) + } + } + }() + + // 使用锁保护状态检查 + session.mutex.Lock() + // 检查分片是否已上传过 + if tables.IsSliceUploaded(session.SliceUploadStatus, int(req.SliceNum)) { + session.mutex.Unlock() + log.Warnf("slice already uploaded,req:%+v", req) + return nil + } + session.mutex.Unlock() + + // 分片hash验证逻辑 + if req.SliceHash != "" { + session.mutex.Lock() + + //验证分片hash值 + if req.SliceNum == 0 { //第一个分片,slicehash是所有的分片hash + hs := strings.Split(req.SliceHash, ",") + if len(hs) != int(session.SliceCnt) { + session.mutex.Unlock() + err := fmt.Errorf("slice hash count mismatch, expected %d, got %d", session.SliceCnt, len(hs)) + log.Error("slice hash count mismatch", req, err) + return err + } + session.SliceHash = req.SliceHash // 存储完整的hash字符串 + } else { + session.SliceHash = req.SliceHash // 存储单个分片hash + } + session.mutex.Unlock() + } + + // 根据存储类型处理分片上传 + switch s := storage.(type) { + case driver.ISliceUpload: + log.Info("SliceUpload support") + if err := s.SliceUpload(ctx, session.SliceUpload, req.SliceNum, file); err != nil { + log.Error("SliceUpload error", req, err) + return err + } + + default: //其他网盘先缓存到本地 + if err := session.ensureTmpFile(); err != nil { + log.Error("ensureTmpFile error", req, err) + return err + } + + // 流式复制,减少内存占用 + sw := &sliceWriter{ + file: session.tmpFile, + offset: int64(req.SliceNum) * int64(session.SliceSize), + } + _, err := utils.CopyWithBuffer(sw, file) + if err != nil { + log.Error("Copy error", req, err) + return err + } + } + + // 原子性更新分片状态 + session.mutex.Lock() + tables.SetSliceUploaded(session.SliceUploadStatus, int(req.SliceNum)) + updateData := *session.SliceUpload // 复制数据 + session.mutex.Unlock() + + err = db.UpdateSliceUpload(&updateData) + if err != nil { + log.Error("UpdateSliceUpload error", updateData, err) + return err + } + return nil +} + +// CompleteUpload 完成上传 - 完整实现原始逻辑 +func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver.Driver, taskID string) (*reqres.UploadSliceCompleteResp, error) { + var err error + + session, err := m.getOrLoadSession(taskID) + if err != nil { + log.Errorf("failed to get session: %+v", err) + return nil, err + } + + // 检查是否所有分片都已上传 + session.mutex.Lock() + allUploaded := tables.IsAllSliceUploaded(session.SliceUploadStatus, session.SliceCnt) + session.mutex.Unlock() + + if !allUploaded { + return &reqres.UploadSliceCompleteResp{ + Complete: 0, + SliceUploadStatus: session.SliceUploadStatus, + TaskID: session.TaskID, + }, nil + } + + defer func() { + // 确保资源清理和缓存删除 + session.cleanup() + m.cache.Delete(session.TaskID) + + if err != nil { + session.mutex.Lock() + session.Status = tables.SliceUploadStatusFailed + session.Message = err.Error() + updateData := *session.SliceUpload + session.mutex.Unlock() + + if updateErr := db.UpdateSliceUpload(&updateData); updateErr != nil { + log.Errorf("Failed to update slice upload status: %v", updateErr) + } + } else { + // 上传成功后从数据库中删除记录,允许重复上传 + if deleteErr := db.DeleteSliceUploadByTaskID(session.TaskID); deleteErr != nil { + log.Errorf("Failed to delete slice upload record: %v", deleteErr) + } + } + }() + + switch s := storage.(type) { + case driver.IUploadSliceComplete: + err = s.UploadSliceComplete(ctx, session.SliceUpload) + if err != nil { + log.Error("UploadSliceComplete error", session.SliceUpload, err) + return nil, err + } + + // 原生分片上传成功,直接返回,defer中会删除数据库记录 + return &reqres.UploadSliceCompleteResp{ + Complete: 1, + TaskID: session.TaskID, + }, nil + + default: + // 其他网盘客户端上传到本地后,上传到网盘,使用任务处理 + session.mutex.Lock() + tmpFile := session.tmpFile + session.mutex.Unlock() + + if tmpFile == nil { + err := fmt.Errorf("tmp file not found [%s]", taskID) + log.Error(err) + return nil, err + } + + var hashInfo utils.HashInfo + if session.HashMd5 != "" { + hashInfo = utils.NewHashInfo(utils.MD5, session.HashMd5) + } else if session.HashSha1 != "" { + hashInfo = utils.NewHashInfo(utils.SHA1, session.HashSha1) + } + + file := &stream.FileStream{ + Obj: &model.Object{ + Name: session.Name, + Size: session.Size, + Modified: time.Now(), + HashInfo: hashInfo, + }, + } + file.Mimetype = utils.GetMimeType(session.Name) + + if session.AsTask { + file.SetTmpFile(tmpFile) + // 防止defer中清理文件 + session.mutex.Lock() + session.tmpFile = nil + session.TmpFile = "" + session.mutex.Unlock() + + _, err = putAsTask(ctx, session.DstPath, file) + if err != nil { + log.Error("putAsTask error", session.SliceUpload, err) + return nil, err + } + return &reqres.UploadSliceCompleteResp{ + Complete: 2, + TaskID: session.TaskID, + }, nil + } + + file.Reader = tmpFile + err = op.Put(ctx, storage, session.ActualPath, file, nil) + if err != nil { + log.Error("Put error", session.SliceUpload, err) + return nil, err + } + return &reqres.UploadSliceCompleteResp{ + Complete: 1, + TaskID: session.TaskID, + }, nil + } +} + +// ensureTmpFile 确保临时文件存在且正确初始化,线程安全 - 保持原始实现 +func (s *SliceUploadSession) ensureTmpFile() error { + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.TmpFile == "" { + tf, err := os.CreateTemp(conf.Conf.TempDir, "file-*") + if err != nil { + return fmt.Errorf("CreateTemp error: %w", err) + } + + abspath := tf.Name() //这里返回的是绝对路径 + if err = os.Truncate(abspath, int64(s.Size)); err != nil { + tf.Close() // 确保文件被关闭 + os.Remove(abspath) // 清理文件 + return fmt.Errorf("Truncate error: %w", err) + } + + s.TmpFile = abspath + s.tmpFile = tf + return nil + } + + if s.tmpFile == nil { + var err error + s.tmpFile, err = os.OpenFile(s.TmpFile, os.O_RDWR, 0644) + if err != nil { + return fmt.Errorf("OpenFile error: %w", err) + } + } + return nil +} + +// cleanup 清理资源,线程安全 - 保持原始实现 +func (s *SliceUploadSession) cleanup() { + s.mutex.Lock() + defer s.mutex.Unlock() + + if s.tmpFile != nil { + if closeErr := s.tmpFile.Close(); closeErr != nil { + log.Errorf("Failed to close tmp file: %v", closeErr) + } + s.tmpFile = nil + } + + if s.TmpFile != "" { + if removeErr := os.Remove(s.TmpFile); removeErr != nil && !os.IsNotExist(removeErr) { + log.Errorf("Failed to remove tmp file %s: %v", s.TmpFile, removeErr) + } + s.TmpFile = "" + } +} + +// 全局管理器实例 +var globalSliceManager = NewSliceUploadManager() + +// sliceWriter 分片写入器 - 保持原始实现 +type sliceWriter struct { + file *os.File + offset int64 +} + +// Write implements io.Writer interface +// 虽然每个分片都定义了一个sliceWriter +// 但是Write方法会在同一个分片复制过程中多次调用, +// 所以要更新自身的offset +func (sw *sliceWriter) Write(p []byte) (int, error) { + n, err := sw.file.WriteAt(p, sw.offset) + sw.offset += int64(n) + return n, err +} From 70fbe17579f3e9d4dff989e9358f3f27292e902f Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Fri, 5 Sep 2025 21:46:17 +0800 Subject: [PATCH 10/26] refactor(upload): Refactor the global shard upload manager using delayed initialization and optimize temporary directory configuration --- internal/fs/fs.go | 6 +++--- internal/fs/sliceup.go | 25 +++++++++++++++++++++---- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 011d9866b..99e83bc8a 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -195,15 +195,15 @@ func PutURL(ctx context.Context, path, dstName, urlStr string) error { // Preup 预上传 - 使用新的管理器重构 func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.PreupReq) (*reqres.PreupResp, error) { - return globalSliceManager.CreateSession(c, s, actualPath, req) + return getGlobalSliceManager().CreateSession(c, s, actualPath, req) } // UploadSlice 上传切片 - 使用新的管理器重构 func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, file multipart.File) error { - return globalSliceManager.UploadSlice(ctx, storage, req, file) + return getGlobalSliceManager().UploadSlice(ctx, storage, req, file) } // SliceUpComplete 完成分片上传 - 使用新的管理器重构 func SliceUpComplete(ctx context.Context, storage driver.Driver, taskID string) (*reqres.UploadSliceCompleteResp, error) { - return globalSliceManager.CompleteUpload(ctx, storage, taskID) + return getGlobalSliceManager().CompleteUpload(ctx, storage, taskID) } diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go index c0310b903..d238679f0 100644 --- a/internal/fs/sliceup.go +++ b/internal/fs/sliceup.go @@ -39,8 +39,12 @@ type SliceUploadSession struct { // NewSliceUploadManager 创建分片上传管理器 func NewSliceUploadManager() *SliceUploadManager { + tempDir := os.TempDir() // 默认使用系统临时目录 + if conf.Conf != nil && conf.Conf.TempDir != "" { + tempDir = conf.Conf.TempDir + } return &SliceUploadManager{ - tempDir: conf.Conf.TempDir, + tempDir: tempDir, } } @@ -421,7 +425,11 @@ func (s *SliceUploadSession) ensureTmpFile() error { defer s.mutex.Unlock() if s.TmpFile == "" { - tf, err := os.CreateTemp(conf.Conf.TempDir, "file-*") + tempDir := os.TempDir() // 默认使用系统临时目录 + if conf.Conf != nil && conf.Conf.TempDir != "" { + tempDir = conf.Conf.TempDir + } + tf, err := os.CreateTemp(tempDir, "file-*") if err != nil { return fmt.Errorf("CreateTemp error: %w", err) } @@ -468,8 +476,17 @@ func (s *SliceUploadSession) cleanup() { } } -// 全局管理器实例 -var globalSliceManager = NewSliceUploadManager() +// 全局管理器实例使用延迟初始化 +var globalSliceManager *SliceUploadManager +var globalSliceManagerOnce sync.Once + +// getGlobalSliceManager 获取全局分片上传管理器(延迟初始化) +func getGlobalSliceManager() *SliceUploadManager { + globalSliceManagerOnce.Do(func() { + globalSliceManager = NewSliceUploadManager() + }) + return globalSliceManager +} // sliceWriter 分片写入器 - 保持原始实现 type sliceWriter struct { From a58e939d4c5f9733877194aabd341673ec3b6858 Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Fri, 5 Sep 2025 23:54:06 +0800 Subject: [PATCH 11/26] feat(upload): Added the function of cleaning up orphan shard upload tasks and optimized temporary file management --- internal/db/db.go | 5 + internal/db/slice_upload.go | 135 ++++++++++++++++++++++++++ internal/fs/sliceup.go | 86 +++++++++++----- internal/model/tables/slice_upload.go | 11 +++ pkg/tempdir/tempdir.go | 35 +++++++ 5 files changed, 246 insertions(+), 26 deletions(-) create mode 100644 pkg/tempdir/tempdir.go diff --git a/internal/db/db.go b/internal/db/db.go index 8fc5149f5..faa61d002 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -17,6 +17,11 @@ func Init(d *gorm.DB) { if err != nil { log.Fatalf("failed migrate database: %s", err.Error()) } + + // 清理启动前遗留的孤儿分片上传任务 + if err := CleanupOrphanedSliceUploads(); err != nil { + log.Errorf("Failed to cleanup orphaned slice uploads: %v", err) + } } func AutoMigrate(dst ...interface{}) error { diff --git a/internal/db/slice_upload.go b/internal/db/slice_upload.go index f57895ae1..2d9a0c9d7 100644 --- a/internal/db/slice_upload.go +++ b/internal/db/slice_upload.go @@ -1,9 +1,17 @@ package db import ( + "io/fs" + "os" + "path/filepath" + "strings" + "time" + "github.com/OpenListTeam/OpenList/v4/internal/model/tables" + "github.com/OpenListTeam/OpenList/v4/pkg/tempdir" "github.com/pkg/errors" "gorm.io/gorm" + log "github.com/sirupsen/logrus" ) func CreateSliceUpload(su *tables.SliceUpload) error { @@ -58,3 +66,130 @@ func UpdateSliceStatusAtomic(taskID string, sliceNum int, status []byte) error { return tx.Save(&su).Error })) } + +// CleanupOrphanedSliceUploads 清理孤儿分片上传记录(启动时调用) +func CleanupOrphanedSliceUploads() error { + // 清理超过24小时的未完成任务 + cutoff := time.Now().Add(-24 * time.Hour) + + var orphanedTasks []tables.SliceUpload + if err := db.Where("status IN (?, ?) AND updated_at < ?", + tables.SliceUploadStatusWaiting, + tables.SliceUploadStatusUploading, + cutoff).Find(&orphanedTasks).Error; err != nil { + return errors.WithStack(err) + } + + cleanedCount := 0 + for _, task := range orphanedTasks { + // 清理临时文件 + if task.TmpFile != "" { + if err := os.Remove(task.TmpFile); err != nil && !os.IsNotExist(err) { + log.Warnf("Failed to remove orphaned tmp file %s: %v", task.TmpFile, err) + } else if err == nil { + log.Debugf("Removed orphaned tmp file: %s", task.TmpFile) + } + } + + // 删除数据库记录 + if err := db.Delete(&task).Error; err != nil { + log.Errorf("Failed to delete orphaned slice upload task %s: %v", task.TaskID, err) + } else { + cleanedCount++ + } + } + + if cleanedCount > 0 { + log.Infof("Cleaned up %d orphaned slice upload tasks", cleanedCount) + } + + // 额外清理:扫描临时目录中的孤儿文件 + return cleanupOrphanedTempFiles() +} + +// cleanupOrphanedTempFiles 清理临时目录中的孤儿文件 +func cleanupOrphanedTempFiles() error { + // 获取临时目录路径,使用共享的tempdir包 + tempDir := tempdir.GetPersistentTempDir() + + // 检查临时目录是否存在 + if _, err := os.Stat(tempDir); os.IsNotExist(err) { + log.Debugf("Temp directory does not exist: %s", tempDir) + return nil + } + + // 获取所有活跃的分片上传任务的临时文件列表 + var activeTasks []tables.SliceUpload + if err := db.Where("tmp_file IS NOT NULL AND tmp_file != '' AND status IN (?, ?)", + tables.SliceUploadStatusWaiting, + tables.SliceUploadStatusUploading).Find(&activeTasks).Error; err != nil { + return errors.WithStack(err) + } + + // 构建活跃文件的映射表 + activeFiles := make(map[string]bool) + for _, task := range activeTasks { + if task.TmpFile != "" { + activeFiles[task.TmpFile] = true + } + } + + cleanedCount := 0 + cutoff := time.Now().Add(-24 * time.Hour) // 只清理超过24小时的文件 + + // 遍历临时目录 + err := filepath.WalkDir(tempDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + log.Warnf("Failed to access path %s: %v", path, err) + return nil // 继续处理其他文件 + } + + // 跳过目录 + if d.IsDir() { + return nil + } + + // 只处理分片上传临时文件(以slice_upload_开头) + if !strings.HasPrefix(d.Name(), "slice_upload_") { + return nil + } + + // 检查文件是否在活跃任务列表中 + if activeFiles[path] { + return nil // 文件仍在使用中,跳过 + } + + // 检查文件修改时间 + info, err := d.Info() + if err != nil { + log.Warnf("Failed to get file info for %s: %v", path, err) + return nil + } + + // 只清理超过24小时的文件 + if info.ModTime().After(cutoff) { + return nil + } + + // 删除孤儿文件 + if err := os.Remove(path); err != nil { + log.Warnf("Failed to remove orphaned temp file %s: %v", path, err) + } else { + log.Debugf("Removed orphaned temp file: %s", path) + cleanedCount++ + } + + return nil + }) + + if err != nil { + log.Errorf("Failed to walk temp directory %s: %v", tempDir, err) + return errors.WithStack(err) + } + + if cleanedCount > 0 { + log.Infof("Cleaned up %d orphaned temp files from %s", cleanedCount, tempDir) + } + + return nil +} diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go index d238679f0..74358f021 100644 --- a/internal/fs/sliceup.go +++ b/internal/fs/sliceup.go @@ -5,6 +5,7 @@ import ( "fmt" "mime/multipart" "os" + "path/filepath" "strings" "sync" "time" @@ -18,6 +19,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/pkg/utils" + "github.com/OpenListTeam/OpenList/v4/pkg/tempdir" "github.com/google/uuid" "github.com/pkg/errors" "gorm.io/gorm" @@ -39,12 +41,9 @@ type SliceUploadSession struct { // NewSliceUploadManager 创建分片上传管理器 func NewSliceUploadManager() *SliceUploadManager { - tempDir := os.TempDir() // 默认使用系统临时目录 - if conf.Conf != nil && conf.Conf.TempDir != "" { - tempDir = conf.Conf.TempDir - } + tempDirPath := tempdir.GetPersistentTempDir() return &SliceUploadManager{ - tempDir: tempDir, + tempDir: tempDirPath, } } @@ -74,14 +73,40 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D } if su.ID != 0 { // 找到未完成的上传任务,支持断点续传 - session := &SliceUploadSession{SliceUpload: su} - m.cache.Store(su.TaskID, session) - return &reqres.PreupResp{ - TaskID: su.TaskID, - SliceSize: su.SliceSize, - SliceCnt: su.SliceCnt, - SliceUploadStatus: su.SliceUploadStatus, - }, nil + // 验证临时文件是否仍然存在(重启后可能被清理) + if su.TmpFile != "" { + if _, err := os.Stat(su.TmpFile); os.IsNotExist(err) { + // 临时文件丢失,清理数据库记录,重新开始 + log.Warnf("Temporary file lost after restart, cleaning up task: %s", su.TaskID) + if deleteErr := db.DeleteSliceUploadByTaskID(su.TaskID); deleteErr != nil { + log.Errorf("Failed to delete lost slice upload task: %v", deleteErr) + } + // 继续创建新任务 + } else { + // 临时文件存在,可以继续断点续传 + session := &SliceUploadSession{SliceUpload: su} + m.cache.Store(su.TaskID, session) + log.Infof("Resuming slice upload after restart: %s, completed slices: %d/%d", + su.TaskID, tables.CountUploadedSlices(su.SliceUploadStatus), su.SliceCnt) + return &reqres.PreupResp{ + TaskID: su.TaskID, + SliceSize: su.SliceSize, + SliceCnt: su.SliceCnt, + SliceUploadStatus: su.SliceUploadStatus, + }, nil + } + } else { + // 原生分片上传(如123open/baidu),无需临时文件 + session := &SliceUploadSession{SliceUpload: su} + m.cache.Store(su.TaskID, session) + log.Infof("Resuming native slice upload after restart: %s", su.TaskID) + return &reqres.PreupResp{ + TaskID: su.TaskID, + SliceSize: su.SliceSize, + SliceCnt: su.SliceCnt, + SliceUploadStatus: su.SliceUploadStatus, + }, nil + } } srcobj, err := op.Get(ctx, storage, actualPath) @@ -419,30 +444,38 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. } } -// ensureTmpFile 确保临时文件存在且正确初始化,线程安全 - 保持原始实现 +// ensureTmpFile 确保临时文件存在且正确初始化,线程安全 - 使用持久化目录 func (s *SliceUploadSession) ensureTmpFile() error { s.mutex.Lock() defer s.mutex.Unlock() if s.TmpFile == "" { - tempDir := os.TempDir() // 默认使用系统临时目录 - if conf.Conf != nil && conf.Conf.TempDir != "" { - tempDir = conf.Conf.TempDir - } - tf, err := os.CreateTemp(tempDir, "file-*") + tempDirPath := tempdir.GetPersistentTempDir() + + // 使用TaskID作为文件名的一部分,确保唯一性和可识别性 + filename := fmt.Sprintf("slice_upload_%s_%s", s.TaskID, s.Name) + // 清理文件名中的特殊字符 + filename = strings.ReplaceAll(filename, "/", "_") + filename = strings.ReplaceAll(filename, "\\", "_") + filename = strings.ReplaceAll(filename, ":", "_") + + tmpPath := filepath.Join(tempDirPath, filename) + + tf, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_RDWR, 0644) if err != nil { - return fmt.Errorf("CreateTemp error: %w", err) + return fmt.Errorf("create persistent temp file error: %w", err) } - abspath := tf.Name() //这里返回的是绝对路径 - if err = os.Truncate(abspath, int64(s.Size)); err != nil { + if err = os.Truncate(tmpPath, int64(s.Size)); err != nil { tf.Close() // 确保文件被关闭 - os.Remove(abspath) // 清理文件 - return fmt.Errorf("Truncate error: %w", err) + os.Remove(tmpPath) // 清理文件 + return fmt.Errorf("truncate persistent temp file error: %w", err) } - s.TmpFile = abspath + s.TmpFile = tmpPath s.tmpFile = tf + + log.Debugf("Created persistent temp file: %s", tmpPath) return nil } @@ -450,8 +483,9 @@ func (s *SliceUploadSession) ensureTmpFile() error { var err error s.tmpFile, err = os.OpenFile(s.TmpFile, os.O_RDWR, 0644) if err != nil { - return fmt.Errorf("OpenFile error: %w", err) + return fmt.Errorf("reopen persistent temp file error: %w", err) } + log.Debugf("Reopened persistent temp file: %s", s.TmpFile) } return nil } diff --git a/internal/model/tables/slice_upload.go b/internal/model/tables/slice_upload.go index f753d6919..05639d454 100644 --- a/internal/model/tables/slice_upload.go +++ b/internal/model/tables/slice_upload.go @@ -60,3 +60,14 @@ func IsAllSliceUploaded(status []byte, sliceCnt uint) bool { } return true } + +// CountUploadedSlices 统计已上传的分片数量 +func CountUploadedSlices(status []byte) uint { + count := uint(0) + for i := 0; i < len(status)*8; i++ { + if status[i/8]&(1<<(i%8)) != 0 { + count++ + } + } + return count +} diff --git a/pkg/tempdir/tempdir.go b/pkg/tempdir/tempdir.go new file mode 100644 index 000000000..026e37fca --- /dev/null +++ b/pkg/tempdir/tempdir.go @@ -0,0 +1,35 @@ +package tempdir + +import ( + "os" + "path/filepath" + "github.com/OpenListTeam/OpenList/v4/internal/conf" +) + +// GetPersistentTempDir 获取持久化临时目录 +// 这个函数被多个包共享使用,避免代码重复 +func GetPersistentTempDir() string { + var tempDir string + + // 优先使用配置的临时目录 + if conf.Conf != nil && conf.Conf.TempDir != "" { + tempDir = conf.Conf.TempDir + } else { + // 使用数据目录下的slice_temp子目录 + if conf.Conf != nil && conf.Conf.Database.DBFile != "" { + // 从数据库文件路径推断数据目录 + dataDir := filepath.Dir(conf.Conf.Database.DBFile) + tempDir = filepath.Join(dataDir, "slice_temp") + } else { + // fallback到当前工作目录下的slice_temp + if wd, err := os.Getwd(); err == nil { + tempDir = filepath.Join(wd, "slice_temp") + } else { + // 最后的fallback + tempDir = filepath.Join(os.TempDir(), "openlist_slice_temp") + } + } + } + + return tempDir +} From 24411ec515c612c9869912c269f0ae7dbeaf1658 Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Sat, 6 Sep 2025 00:38:52 +0800 Subject: [PATCH 12/26] feat(upload): Added the function of resuming unfinished multipart upload tasks, supported streaming uploads and optimized upload logic --- internal/db/slice_upload.go | 17 +++ internal/fs/fs.go | 7 +- internal/fs/sliceup.go | 203 +++++++++++++++++++++++++- internal/model/tables/slice_upload.go | 2 + server/handles/fsup.go | 56 +++---- server/router.go | 2 +- 6 files changed, 247 insertions(+), 40 deletions(-) diff --git a/internal/db/slice_upload.go b/internal/db/slice_upload.go index 2d9a0c9d7..362791f4e 100644 --- a/internal/db/slice_upload.go +++ b/internal/db/slice_upload.go @@ -43,6 +43,23 @@ func DeleteSliceUploadByTaskID(taskID string) error { return errors.WithStack(db.Where("task_id = ?", taskID).Delete(&tables.SliceUpload{}).Error) } +// GetIncompleteSliceUploads 获取所有未完成的分片上传任务(用于重启恢复) +func GetIncompleteSliceUploads() ([]*tables.SliceUpload, error) { + var uploads []*tables.SliceUpload + err := db.Where("status IN (?)", []int{ + tables.SliceUploadStatusWaiting, + tables.SliceUploadStatusUploading, + tables.SliceUploadStatusProxyComplete, + tables.SliceUploadStatusPendingComplete, + }).Find(&uploads).Error + + if err != nil { + return nil, errors.WithStack(err) + } + + return uploads, nil +} + // UpdateSliceUploadWithTx 使用事务更新分片上传状态,确保数据一致性 func UpdateSliceUploadWithTx(su *tables.SliceUpload) error { return errors.WithStack(db.Transaction(func(tx *gorm.DB) error { diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 99e83bc8a..0afc0ad62 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -3,7 +3,6 @@ package fs import ( "context" "io" - "mime/multipart" log "github.com/sirupsen/logrus" @@ -198,9 +197,9 @@ func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.Pr return getGlobalSliceManager().CreateSession(c, s, actualPath, req) } -// UploadSlice 上传切片 - 使用新的管理器重构 -func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, file multipart.File) error { - return getGlobalSliceManager().UploadSlice(ctx, storage, req, file) +// UploadSlice 流式上传切片 - 使用新的管理器重构,支持流式上传 +func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, reader io.Reader) error { + return getGlobalSliceManager().UploadSlice(ctx, storage, req, reader) } // SliceUpComplete 完成分片上传 - 使用新的管理器重构 diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go index 74358f021..7bc6b9f48 100644 --- a/internal/fs/sliceup.go +++ b/internal/fs/sliceup.go @@ -3,7 +3,7 @@ package fs import ( "context" "fmt" - "mime/multipart" + "io" "os" "path/filepath" "strings" @@ -42,9 +42,14 @@ type SliceUploadSession struct { // NewSliceUploadManager 创建分片上传管理器 func NewSliceUploadManager() *SliceUploadManager { tempDirPath := tempdir.GetPersistentTempDir() - return &SliceUploadManager{ + manager := &SliceUploadManager{ tempDir: tempDirPath, } + + // 启动时恢复未完成的上传任务 + go manager.recoverIncompleteUploads() + + return manager } // CreateSession 创建新的上传会话 - 完整实现Preup逻辑 @@ -227,8 +232,8 @@ func (m *SliceUploadManager) getOrLoadSession(taskID string) (*SliceUploadSessio return session, nil } -// UploadSlice 上传分片 - 完整实现原始逻辑 -func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, file multipart.File) error { +// UploadSlice 流式上传分片 - 支持流式上传,避免表单上传的内存占用 +func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, reader io.Reader) error { var err error session, err := m.getOrLoadSession(req.TaskID) @@ -286,7 +291,39 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri switch s := storage.(type) { case driver.ISliceUpload: log.Info("SliceUpload support") - if err := s.SliceUpload(ctx, session.SliceUpload, req.SliceNum, file); err != nil { + // 对于支持原生分片上传的驱动,我们需要将流数据缓存到临时文件中 + // 以支持重试和断点续传场景 + if err := session.ensureTmpFile(); err != nil { + log.Error("ensureTmpFile error for native slice upload", req, err) + return err + } + + // 将流数据写入临时文件的指定位置 + sw := &sliceWriter{ + file: session.tmpFile, + offset: int64(req.SliceNum) * int64(session.SliceSize), + } + writtenBytes, err := utils.CopyWithBuffer(sw, reader) + if err != nil { + log.Error("Copy to temp file error for native slice upload", req, err) + return err + } + log.Debugf("Written %d bytes to temp file for slice %d", writtenBytes, req.SliceNum) + + // 从临时文件读取数据进行上传 + sliceSize := session.SliceSize + if req.SliceNum == session.SliceCnt-1 { + // 最后一个分片,计算实际大小 + sliceSize = session.Size - int64(req.SliceNum)*int64(session.SliceSize) + } + + sliceReader := &sliceReader{ + file: session.tmpFile, + offset: int64(req.SliceNum) * int64(session.SliceSize), + size: sliceSize, + } + + if err := s.SliceUpload(ctx, session.SliceUpload, req.SliceNum, sliceReader); err != nil { log.Error("SliceUpload error", req, err) return err } @@ -302,7 +339,7 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri file: session.tmpFile, offset: int64(req.SliceNum) * int64(session.SliceSize), } - _, err := utils.CopyWithBuffer(sw, file) + _, err := utils.CopyWithBuffer(sw, reader) if err != nil { log.Error("Copy error", req, err) return err @@ -336,9 +373,10 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. // 检查是否所有分片都已上传 session.mutex.Lock() allUploaded := tables.IsAllSliceUploaded(session.SliceUploadStatus, session.SliceCnt) + isPendingComplete := session.Status == tables.SliceUploadStatusPendingComplete session.mutex.Unlock() - if !allUploaded { + if !allUploaded && !isPendingComplete { return &reqres.UploadSliceCompleteResp{ Complete: 0, SliceUploadStatus: session.SliceUploadStatus, @@ -346,6 +384,11 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. }, nil } + // 如果是PendingComplete状态,说明是重启后恢复的任务,直接尝试完成 + if isPendingComplete { + log.Infof("Processing pending complete task after restart: %s", session.TaskID) + } + defer func() { // 确保资源清理和缓存删除 session.cleanup() @@ -475,6 +518,12 @@ func (s *SliceUploadSession) ensureTmpFile() error { s.TmpFile = tmpPath s.tmpFile = tf + // 更新数据库中的临时文件路径,支持重启后恢复 + if updateErr := db.UpdateSliceUpload(s.SliceUpload); updateErr != nil { + log.Errorf("Failed to update temp file path in database: %v", updateErr) + // 不返回错误,因为文件已经创建成功,只是数据库更新失败 + } + log.Debugf("Created persistent temp file: %s", tmpPath) return nil } @@ -537,3 +586,143 @@ func (sw *sliceWriter) Write(p []byte) (int, error) { sw.offset += int64(n) return n, err } + +// sliceReader 用于从临时文件中读取指定分片的数据,支持断点续传 +type sliceReader struct { + file *os.File + offset int64 + size int64 + position int64 // 当前读取位置(相对于分片开始) +} + +// Read implements io.Reader interface +func (sr *sliceReader) Read(p []byte) (int, error) { + if sr.position >= sr.size { + return 0, io.EOF + } + + // 计算实际可读取的字节数 + remaining := sr.size - sr.position + if int64(len(p)) > remaining { + p = p[:remaining] + } + + n, err := sr.file.ReadAt(p, sr.offset+sr.position) + sr.position += int64(n) + return n, err +} + +// Seek implements io.Seeker interface,支持重试场景 +func (sr *sliceReader) Seek(offset int64, whence int) (int64, error) { + var newPos int64 + switch whence { + case io.SeekStart: + newPos = offset + case io.SeekCurrent: + newPos = sr.position + offset + case io.SeekEnd: + newPos = sr.size + offset + default: + return 0, fmt.Errorf("invalid whence value: %d", whence) + } + + if newPos < 0 { + return 0, fmt.Errorf("negative position: %d", newPos) + } + if newPos > sr.size { + newPos = sr.size + } + + sr.position = newPos + return newPos, nil +} + +// recoverIncompleteUploads 恢复重启后未完成的上传任务 +func (m *SliceUploadManager) recoverIncompleteUploads() { + defer func() { + if r := recover(); r != nil { + log.Errorf("Panic in recoverIncompleteUploads: %v", r) + } + }() + + // 等待一段时间,确保系统完全启动 + time.Sleep(10 * time.Second) + + log.Info("Starting recovery of incomplete slice uploads...") + + // 查询所有未完成的上传任务 + incompleteUploads, err := db.GetIncompleteSliceUploads() + if err != nil { + log.Errorf("Failed to get incomplete slice uploads: %v", err) + return + } + + if len(incompleteUploads) == 0 { + log.Info("No incomplete slice uploads found") + return + } + + log.Infof("Found %d incomplete slice uploads, starting recovery...", len(incompleteUploads)) + + for _, upload := range incompleteUploads { + m.recoverSingleUpload(upload) + } + + log.Info("Slice upload recovery completed") +} + +// recoverSingleUpload 恢复单个上传任务 +func (m *SliceUploadManager) recoverSingleUpload(upload *tables.SliceUpload) { + defer func() { + if r := recover(); r != nil { + log.Errorf("Panic in recoverSingleUpload for task %s: %v", upload.TaskID, r) + } + }() + + log.Infof("Recovering upload task: %s, status: %s", upload.TaskID, upload.Status) + + // 检查是否所有切片都已上传完成 + if tables.IsAllSliceUploaded(upload.SliceUploadStatus, upload.SliceCnt) { + // 所有切片都已完成,尝试完成上传 + log.Infof("All slices completed for task %s, attempting to complete upload", upload.TaskID) + m.attemptCompleteUpload(upload) + return + } + + // 部分切片未完成的情况 + completedSlices := tables.CountUploadedSlices(upload.SliceUploadStatus) + log.Infof("Task %s: %d/%d slices completed, marking as available for resume", + upload.TaskID, completedSlices, upload.SliceCnt) + + // 更新状态为等待用户继续上传 + upload.Status = tables.SliceUploadStatusWaiting + upload.Message = "Ready for resume after server restart" + if err := db.UpdateSliceUpload(upload); err != nil { + log.Errorf("Failed to update slice upload status for task %s: %v", upload.TaskID, err) + } + + // 如果有临时文件但文件不存在,清理记录 + if upload.TmpFile != "" { + if _, err := os.Stat(upload.TmpFile); os.IsNotExist(err) { + log.Warnf("Temporary file lost for task %s, cleaning up", upload.TaskID) + if err := db.DeleteSliceUploadByTaskID(upload.TaskID); err != nil { + log.Errorf("Failed to clean up lost task %s: %v", upload.TaskID, err) + } + } + } +} + +// attemptCompleteUpload 尝试完成上传(用于恢复已完成切片的任务) +func (m *SliceUploadManager) attemptCompleteUpload(upload *tables.SliceUpload) { + // 这里需要获取存储驱动,但在恢复阶段我们无法直接获取 storage driver + // 所以我们将状态标记为待完成,等用户下次操作时自动完成 + upload.Status = tables.SliceUploadStatusPendingComplete + upload.Message = "All slices completed, waiting for final completion" + + if err := db.UpdateSliceUpload(upload); err != nil { + log.Errorf("Failed to update slice upload to pending complete for task %s: %v", upload.TaskID, err) + return + } + + log.Infof("Task %s marked as pending completion", upload.TaskID) +} diff --git a/internal/model/tables/slice_upload.go b/internal/model/tables/slice_upload.go index 05639d454..9a66bb28b 100644 --- a/internal/model/tables/slice_upload.go +++ b/internal/model/tables/slice_upload.go @@ -13,6 +13,8 @@ const ( SliceUploadStatusFailed // SliceUploadStatusProxyComplete 成功上传到代理服务,等待上传到网盘 SliceUploadStatusProxyComplete + // SliceUploadStatusPendingComplete 等待完成(所有切片已上传,等待最终完成处理) + SliceUploadStatusPendingComplete ) // SliceUpload 分片上传数据表 diff --git a/server/handles/fsup.go b/server/handles/fsup.go index d99a22d0e..0904de52d 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -18,7 +18,6 @@ import ( "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/OpenListTeam/OpenList/v4/server/common" "github.com/gin-gonic/gin" - log "github.com/sirupsen/logrus" ) func getLastModified(c *gin.Context) time.Time { @@ -260,51 +259,52 @@ func FsPreup(c *gin.Context) { common.SuccessResp(c, res) } -// FsUpSlice 上传分片 +// FsUpSlice 流式上传分片 - 使用PUT方法进行流式上传,避免表单上传的内存占用 func FsUpSlice(c *gin.Context) { - req := &reqres.UploadSliceReq{} - req.SliceHash = c.PostForm("slice_hash") - sn, err := strconv.ParseUint(c.PostForm("slice_num"), 10, 32) - if err != nil { - common.ErrorResp(c, fmt.Errorf("invalid slice_num: %w", err), 400) + // 从HTTP头获取参数 + taskID := c.GetHeader("X-Task-ID") + if taskID == "" { + common.ErrorResp(c, fmt.Errorf("X-Task-ID header is required"), 400) return } - req.SliceNum = uint(sn) - req.TaskID = c.PostForm("task_id") - if req.TaskID == "" { - common.ErrorResp(c, fmt.Errorf("task_id is required"), 400) + + sliceNumStr := c.GetHeader("X-Slice-Num") + if sliceNumStr == "" { + common.ErrorResp(c, fmt.Errorf("X-Slice-Num header is required"), 400) return } - - file, err := c.FormFile("slice") + + sliceNum, err := strconv.ParseUint(sliceNumStr, 10, 32) if err != nil { - common.ErrorResp(c, fmt.Errorf("failed to get slice file: %w", err), 400) + common.ErrorResp(c, fmt.Errorf("invalid X-Slice-Num: %w", err), 400) return } - if file.Size == 0 { - common.ErrorResp(c, fmt.Errorf("slice file is empty"), 400) - return + sliceHash := c.GetHeader("X-Slice-Hash") + + // 构建请求对象 + req := &reqres.UploadSliceReq{ + TaskID: taskID, + SliceHash: sliceHash, + SliceNum: uint(sliceNum), } - fd, err := file.Open() - if err != nil { - common.ErrorResp(c, fmt.Errorf("failed to open slice file: %w", err), 500) + // 获取请求体作为流 + reader := c.Request.Body + if reader == nil { + common.ErrorResp(c, fmt.Errorf("request body is required"), 400) return } - defer func() { - if closeErr := fd.Close(); closeErr != nil { - log.Errorf("Failed to close slice file: %v", closeErr) - } - }() - + storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) - - err = fs.UploadSlice(c.Request.Context(), storage, req, fd) + + // 调用流式上传分片函数 + err = fs.UploadSlice(c.Request.Context(), storage, req, reader) if err != nil { common.ErrorResp(c, fmt.Errorf("upload slice failed: %w", err), 500) return } + common.SuccessResp(c) } diff --git a/server/router.go b/server/router.go index 754ef2ce3..a88f36cbe 100644 --- a/server/router.go +++ b/server/router.go @@ -208,7 +208,7 @@ func _fs(g *gin.RouterGroup) { g.GET("/upload/info", middlewares.FsSliceUp, handles.FsUpInfo) g.POST("/preup", middlewares.FsSliceUp, handles.FsPreup) - g.POST("/slice_upload", middlewares.FsSliceUp, handles.FsUpSlice) + g.PUT("/slice_upload", middlewares.FsSliceUp, handles.FsUpSlice) g.POST("/slice_upload_complete", middlewares.FsSliceUp, handles.FsUpSliceComplete) // g.POST("/add_aria2", handles.AddOfflineDownload) From 05882df403065a81bd594db27625d9c84bec5323 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sat, 6 Sep 2025 09:47:48 +0800 Subject: [PATCH 13/26] =?UTF-8?q?=E7=BB=9F=E4=B8=80=E6=8C=81=E4=B9=85?= =?UTF-8?q?=E5=8C=96=E4=B8=B4=E6=97=B6=E7=9B=AE=E5=BD=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/bootstrap/config.go | 8 ++ internal/bootstrap/task.go | 3 - internal/conf/config.go | 4 + internal/db/slice_upload.go | 66 ++++++++-------- internal/fs/sliceup.go | 105 ++++++++++++-------------- internal/model/tables/base.go | 10 --- internal/model/tables/slice_upload.go | 47 ++++++------ internal/offline_download/tool/add.go | 10 +-- pkg/tempdir/tempdir.go | 35 --------- 9 files changed, 122 insertions(+), 166 deletions(-) delete mode 100644 internal/model/tables/base.go delete mode 100644 pkg/tempdir/tempdir.go diff --git a/internal/bootstrap/config.go b/internal/bootstrap/config.go index 2209c64f3..4ed8e9145 100644 --- a/internal/bootstrap/config.go +++ b/internal/bootstrap/config.go @@ -125,9 +125,14 @@ func InitConfig() { if err != nil { log.Fatalf("create temp dir error: %+v", err) } + err = os.MkdirAll(conf.GetPersistentTempDir(), 0o777) + if err != nil { + log.Fatalf("create persistent temp dir error: %+v", err) + } log.Debugf("config: %+v", conf.Conf) base.InitClient() initURL() + CleanTempDir() } func confFromEnv() { @@ -160,6 +165,9 @@ func CleanTempDir() { log.Errorln("failed list temp file: ", err) } for _, file := range files { + if file.Name() == "persistent" { + continue + } if err := os.RemoveAll(filepath.Join(conf.Conf.TempDir, file.Name())); err != nil { log.Errorln("failed delete temp file: ", err) } diff --git a/internal/bootstrap/task.go b/internal/bootstrap/task.go index 47e0b59eb..5294db76a 100644 --- a/internal/bootstrap/task.go +++ b/internal/bootstrap/task.go @@ -38,9 +38,6 @@ func InitTaskManager() { op.RegisterSettingChangingCallback(func() { tool.TransferTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskOfflineDownloadTransferThreadsNum, conf.Conf.Tasks.Transfer.Workers))) }) - if len(tool.TransferTaskManager.GetAll()) == 0 { //prevent offline downloaded files from being deleted - CleanTempDir() - } fs.ArchiveDownloadTaskManager = tache.NewManager[*fs.ArchiveDownloadTask](tache.WithWorks(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers)), tache.WithPersistFunction(db.GetTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant), db.UpdateTaskDataFunc("decompress", conf.Conf.Tasks.Decompress.TaskPersistant)), tache.WithMaxRetry(conf.Conf.Tasks.Decompress.MaxRetry)) op.RegisterSettingChangingCallback(func() { fs.ArchiveDownloadTaskManager.SetWorkersNumActive(taskFilterNegative(setting.GetInt(conf.TaskDecompressDownloadThreadsNum, conf.Conf.Tasks.Decompress.Workers))) diff --git a/internal/conf/config.go b/internal/conf/config.go index af198e916..08dd3603a 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -245,3 +245,7 @@ func DefaultConfig(dataDir string) *Config { LastLaunchedVersion: "", } } + +func GetPersistentTempDir() string { + return filepath.Join(Conf.TempDir, "persistent") +} diff --git a/internal/db/slice_upload.go b/internal/db/slice_upload.go index 362791f4e..218937d6c 100644 --- a/internal/db/slice_upload.go +++ b/internal/db/slice_upload.go @@ -6,12 +6,12 @@ import ( "path/filepath" "strings" "time" - + + "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/model/tables" - "github.com/OpenListTeam/OpenList/v4/pkg/tempdir" "github.com/pkg/errors" - "gorm.io/gorm" log "github.com/sirupsen/logrus" + "gorm.io/gorm" ) func CreateSliceUpload(su *tables.SliceUpload) error { @@ -52,11 +52,11 @@ func GetIncompleteSliceUploads() ([]*tables.SliceUpload, error) { tables.SliceUploadStatusProxyComplete, tables.SliceUploadStatusPendingComplete, }).Find(&uploads).Error - + if err != nil { return nil, errors.WithStack(err) } - + return uploads, nil } @@ -75,10 +75,10 @@ func UpdateSliceStatusAtomic(taskID string, sliceNum int, status []byte) error { if err := tx.Where("task_id = ?", taskID).First(&su).Error; err != nil { return err } - + // 更新分片状态 tables.SetSliceUploaded(su.SliceUploadStatus, sliceNum) - + // 保存更新 return tx.Save(&su).Error })) @@ -88,15 +88,15 @@ func UpdateSliceStatusAtomic(taskID string, sliceNum int, status []byte) error { func CleanupOrphanedSliceUploads() error { // 清理超过24小时的未完成任务 cutoff := time.Now().Add(-24 * time.Hour) - + var orphanedTasks []tables.SliceUpload - if err := db.Where("status IN (?, ?) AND updated_at < ?", - tables.SliceUploadStatusWaiting, - tables.SliceUploadStatusUploading, + if err := db.Where("status IN (?, ?) AND updated_at < ?", + tables.SliceUploadStatusWaiting, + tables.SliceUploadStatusUploading, cutoff).Find(&orphanedTasks).Error; err != nil { return errors.WithStack(err) } - + cleanedCount := 0 for _, task := range orphanedTasks { // 清理临时文件 @@ -107,7 +107,7 @@ func CleanupOrphanedSliceUploads() error { log.Debugf("Removed orphaned tmp file: %s", task.TmpFile) } } - + // 删除数据库记录 if err := db.Delete(&task).Error; err != nil { log.Errorf("Failed to delete orphaned slice upload task %s: %v", task.TaskID, err) @@ -115,11 +115,11 @@ func CleanupOrphanedSliceUploads() error { cleanedCount++ } } - + if cleanedCount > 0 { log.Infof("Cleaned up %d orphaned slice upload tasks", cleanedCount) } - + // 额外清理:扫描临时目录中的孤儿文件 return cleanupOrphanedTempFiles() } @@ -127,22 +127,22 @@ func CleanupOrphanedSliceUploads() error { // cleanupOrphanedTempFiles 清理临时目录中的孤儿文件 func cleanupOrphanedTempFiles() error { // 获取临时目录路径,使用共享的tempdir包 - tempDir := tempdir.GetPersistentTempDir() - + tempDir := conf.GetPersistentTempDir() + // 检查临时目录是否存在 if _, err := os.Stat(tempDir); os.IsNotExist(err) { log.Debugf("Temp directory does not exist: %s", tempDir) return nil } - + // 获取所有活跃的分片上传任务的临时文件列表 var activeTasks []tables.SliceUpload - if err := db.Where("tmp_file IS NOT NULL AND tmp_file != '' AND status IN (?, ?)", - tables.SliceUploadStatusWaiting, + if err := db.Where("tmp_file IS NOT NULL AND tmp_file != '' AND status IN (?, ?)", + tables.SliceUploadStatusWaiting, tables.SliceUploadStatusUploading).Find(&activeTasks).Error; err != nil { return errors.WithStack(err) } - + // 构建活跃文件的映射表 activeFiles := make(map[string]bool) for _, task := range activeTasks { @@ -150,44 +150,44 @@ func cleanupOrphanedTempFiles() error { activeFiles[task.TmpFile] = true } } - + cleanedCount := 0 cutoff := time.Now().Add(-24 * time.Hour) // 只清理超过24小时的文件 - + // 遍历临时目录 err := filepath.WalkDir(tempDir, func(path string, d fs.DirEntry, err error) error { if err != nil { log.Warnf("Failed to access path %s: %v", path, err) return nil // 继续处理其他文件 } - + // 跳过目录 if d.IsDir() { return nil } - + // 只处理分片上传临时文件(以slice_upload_开头) if !strings.HasPrefix(d.Name(), "slice_upload_") { return nil } - + // 检查文件是否在活跃任务列表中 if activeFiles[path] { return nil // 文件仍在使用中,跳过 } - + // 检查文件修改时间 info, err := d.Info() if err != nil { log.Warnf("Failed to get file info for %s: %v", path, err) return nil } - + // 只清理超过24小时的文件 if info.ModTime().After(cutoff) { return nil } - + // 删除孤儿文件 if err := os.Remove(path); err != nil { log.Warnf("Failed to remove orphaned temp file %s: %v", path, err) @@ -195,18 +195,18 @@ func cleanupOrphanedTempFiles() error { log.Debugf("Removed orphaned temp file: %s", path) cleanedCount++ } - + return nil }) - + if err != nil { log.Errorf("Failed to walk temp directory %s: %v", tempDir, err) return errors.WithStack(err) } - + if cleanedCount > 0 { log.Infof("Cleaned up %d orphaned temp files from %s", cleanedCount, tempDir) } - + return nil } diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go index 7bc6b9f48..1cab688f3 100644 --- a/internal/fs/sliceup.go +++ b/internal/fs/sliceup.go @@ -19,17 +19,15 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/pkg/utils" - "github.com/OpenListTeam/OpenList/v4/pkg/tempdir" "github.com/google/uuid" "github.com/pkg/errors" - "gorm.io/gorm" log "github.com/sirupsen/logrus" + "gorm.io/gorm" ) // SliceUploadManager 分片上传管理器 type SliceUploadManager struct { - cache sync.Map // TaskID -> *SliceUploadSession - tempDir string // 临时文件目录 + cache sync.Map // TaskID -> *SliceUploadSession } // SliceUploadSession 分片上传会话 @@ -41,14 +39,9 @@ type SliceUploadSession struct { // NewSliceUploadManager 创建分片上传管理器 func NewSliceUploadManager() *SliceUploadManager { - tempDirPath := tempdir.GetPersistentTempDir() - manager := &SliceUploadManager{ - tempDir: tempDirPath, - } - + manager := &SliceUploadManager{} // 启动时恢复未完成的上传任务 go manager.recoverIncompleteUploads() - return manager } @@ -77,7 +70,7 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D return nil, errors.WithStack(err) } - if su.ID != 0 { // 找到未完成的上传任务,支持断点续传 + if su.TaskID != "" { // 找到未完成的上传任务,支持断点续传 // 验证临时文件是否仍然存在(重启后可能被清理) if su.TmpFile != "" { if _, err := os.Stat(su.TmpFile); os.IsNotExist(err) { @@ -91,7 +84,7 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D // 临时文件存在,可以继续断点续传 session := &SliceUploadSession{SliceUpload: su} m.cache.Store(su.TaskID, session) - log.Infof("Resuming slice upload after restart: %s, completed slices: %d/%d", + log.Infof("Resuming slice upload after restart: %s, completed slices: %d/%d", su.TaskID, tables.CountUploadedSlices(su.SliceUploadStatus), su.SliceCnt) return &reqres.PreupResp{ TaskID: su.TaskID, @@ -113,7 +106,7 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D }, nil } } - + srcobj, err := op.Get(ctx, storage, actualPath) if err != nil { log.Error(err) @@ -123,7 +116,7 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D // 生成唯一的TaskID taskID := uuid.New().String() - + //创建新的上传任务 createsu := &tables.SliceUpload{ TaskID: taskID, @@ -142,7 +135,7 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D createsu.UserID = user.ID } log.Infof("storage mount path %s", storage.GetStorage().MountPath) - + switch st := storage.(type) { case driver.IPreup: log.Info("preup support") @@ -167,7 +160,7 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D log.Info("Preup not support") createsu.SliceSize = 10 * utils.MB } - + createsu.SliceCnt = uint((req.Size + createsu.SliceSize - 1) / createsu.SliceSize) createsu.SliceUploadStatus = make([]byte, (createsu.SliceCnt+7)/8) createsu.Status = tables.SliceUploadStatusWaiting // 设置初始状态 @@ -177,10 +170,10 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D log.Error("CreateSliceUpload error", createsu, err) return nil, errors.WithStack(err) } - + session := &SliceUploadSession{SliceUpload: createsu} m.cache.Store(taskID, session) - + return &reqres.PreupResp{ Reuse: false, SliceUploadStatus: createsu.SliceUploadStatus, @@ -206,7 +199,7 @@ func (m *SliceUploadManager) getOrLoadSession(taskID string) (*SliceUploadSessio m.cache.Store(taskID, session) return session, nil } - + // 缓存中存在,但可能是nil值,需要检查 if sa == nil { // 说明之前存储了nil,需要重新从数据库加载 @@ -221,7 +214,7 @@ func (m *SliceUploadManager) getOrLoadSession(taskID string) (*SliceUploadSessio m.cache.Store(taskID, session) return session, nil } - + session := sa.(*SliceUploadSession) // 刷新数据库状态以确保数据一致性 if freshSu, err := db.GetSliceUploadByTaskID(taskID); err == nil { @@ -241,7 +234,7 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri log.Errorf("failed to get session: %+v", err) return err } - + // 确保并发安全的错误处理 defer func() { if err != nil { @@ -250,7 +243,7 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri session.Message = err.Error() updateData := *session.SliceUpload // 复制数据避免锁持有时间过长 session.mutex.Unlock() - + if updateErr := db.UpdateSliceUpload(&updateData); updateErr != nil { log.Errorf("Failed to update slice upload status: %v", updateErr) } @@ -316,13 +309,13 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri // 最后一个分片,计算实际大小 sliceSize = session.Size - int64(req.SliceNum)*int64(session.SliceSize) } - + sliceReader := &sliceReader{ file: session.tmpFile, offset: int64(req.SliceNum) * int64(session.SliceSize), size: sliceSize, } - + if err := s.SliceUpload(ctx, session.SliceUpload, req.SliceNum, sliceReader); err != nil { log.Error("SliceUpload error", req, err) return err @@ -345,7 +338,7 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri return err } } - + // 原子性更新分片状态 session.mutex.Lock() tables.SetSliceUploaded(session.SliceUploadStatus, int(req.SliceNum)) @@ -369,13 +362,13 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. log.Errorf("failed to get session: %+v", err) return nil, err } - + // 检查是否所有分片都已上传 session.mutex.Lock() allUploaded := tables.IsAllSliceUploaded(session.SliceUploadStatus, session.SliceCnt) isPendingComplete := session.Status == tables.SliceUploadStatusPendingComplete session.mutex.Unlock() - + if !allUploaded && !isPendingComplete { return &reqres.UploadSliceCompleteResp{ Complete: 0, @@ -393,14 +386,14 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. // 确保资源清理和缓存删除 session.cleanup() m.cache.Delete(session.TaskID) - + if err != nil { session.mutex.Lock() session.Status = tables.SliceUploadStatusFailed session.Message = err.Error() updateData := *session.SliceUpload session.mutex.Unlock() - + if updateErr := db.UpdateSliceUpload(&updateData); updateErr != nil { log.Errorf("Failed to update slice upload status: %v", updateErr) } @@ -411,7 +404,7 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. } } }() - + switch s := storage.(type) { case driver.IUploadSliceComplete: err = s.UploadSliceComplete(ctx, session.SliceUpload) @@ -419,7 +412,7 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. log.Error("UploadSliceComplete error", session.SliceUpload, err) return nil, err } - + // 原生分片上传成功,直接返回,defer中会删除数据库记录 return &reqres.UploadSliceCompleteResp{ Complete: 1, @@ -431,13 +424,13 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. session.mutex.Lock() tmpFile := session.tmpFile session.mutex.Unlock() - + if tmpFile == nil { err := fmt.Errorf("tmp file not found [%s]", taskID) log.Error(err) return nil, err } - + var hashInfo utils.HashInfo if session.HashMd5 != "" { hashInfo = utils.NewHashInfo(utils.MD5, session.HashMd5) @@ -454,7 +447,7 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. }, } file.Mimetype = utils.GetMimeType(session.Name) - + if session.AsTask { file.SetTmpFile(tmpFile) // 防止defer中清理文件 @@ -462,7 +455,7 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. session.tmpFile = nil session.TmpFile = "" session.mutex.Unlock() - + _, err = putAsTask(ctx, session.DstPath, file) if err != nil { log.Error("putAsTask error", session.SliceUpload, err) @@ -473,7 +466,7 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. TaskID: session.TaskID, }, nil } - + file.Reader = tmpFile err = op.Put(ctx, storage, session.ActualPath, file, nil) if err != nil { @@ -491,43 +484,41 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. func (s *SliceUploadSession) ensureTmpFile() error { s.mutex.Lock() defer s.mutex.Unlock() - + if s.TmpFile == "" { - tempDirPath := tempdir.GetPersistentTempDir() - // 使用TaskID作为文件名的一部分,确保唯一性和可识别性 filename := fmt.Sprintf("slice_upload_%s_%s", s.TaskID, s.Name) // 清理文件名中的特殊字符 filename = strings.ReplaceAll(filename, "/", "_") filename = strings.ReplaceAll(filename, "\\", "_") filename = strings.ReplaceAll(filename, ":", "_") - - tmpPath := filepath.Join(tempDirPath, filename) - + + tmpPath := filepath.Join(conf.GetPersistentTempDir(), filename) + tf, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_RDWR, 0644) if err != nil { return fmt.Errorf("create persistent temp file error: %w", err) } - + if err = os.Truncate(tmpPath, int64(s.Size)); err != nil { - tf.Close() // 确保文件被关闭 + tf.Close() // 确保文件被关闭 os.Remove(tmpPath) // 清理文件 return fmt.Errorf("truncate persistent temp file error: %w", err) } - + s.TmpFile = tmpPath s.tmpFile = tf - + // 更新数据库中的临时文件路径,支持重启后恢复 if updateErr := db.UpdateSliceUpload(s.SliceUpload); updateErr != nil { log.Errorf("Failed to update temp file path in database: %v", updateErr) // 不返回错误,因为文件已经创建成功,只是数据库更新失败 } - + log.Debugf("Created persistent temp file: %s", tmpPath) return nil } - + if s.tmpFile == nil { var err error s.tmpFile, err = os.OpenFile(s.TmpFile, os.O_RDWR, 0644) @@ -543,14 +534,14 @@ func (s *SliceUploadSession) ensureTmpFile() error { func (s *SliceUploadSession) cleanup() { s.mutex.Lock() defer s.mutex.Unlock() - + if s.tmpFile != nil { if closeErr := s.tmpFile.Close(); closeErr != nil { log.Errorf("Failed to close tmp file: %v", closeErr) } s.tmpFile = nil } - + if s.TmpFile != "" { if removeErr := os.Remove(s.TmpFile); removeErr != nil && !os.IsNotExist(removeErr) { log.Errorf("Failed to remove tmp file %s: %v", s.TmpFile, removeErr) @@ -600,13 +591,13 @@ func (sr *sliceReader) Read(p []byte) (int, error) { if sr.position >= sr.size { return 0, io.EOF } - + // 计算实际可读取的字节数 remaining := sr.size - sr.position if int64(len(p)) > remaining { p = p[:remaining] } - + n, err := sr.file.ReadAt(p, sr.offset+sr.position) sr.position += int64(n) return n, err @@ -625,14 +616,14 @@ func (sr *sliceReader) Seek(offset int64, whence int) (int64, error) { default: return 0, fmt.Errorf("invalid whence value: %d", whence) } - + if newPos < 0 { return 0, fmt.Errorf("negative position: %d", newPos) } if newPos > sr.size { newPos = sr.size } - + sr.position = newPos return newPos, nil } @@ -691,7 +682,7 @@ func (m *SliceUploadManager) recoverSingleUpload(upload *tables.SliceUpload) { // 部分切片未完成的情况 completedSlices := tables.CountUploadedSlices(upload.SliceUploadStatus) - log.Infof("Task %s: %d/%d slices completed, marking as available for resume", + log.Infof("Task %s: %d/%d slices completed, marking as available for resume", upload.TaskID, completedSlices, upload.SliceCnt) // 更新状态为等待用户继续上传 @@ -718,11 +709,11 @@ func (m *SliceUploadManager) attemptCompleteUpload(upload *tables.SliceUpload) { // 所以我们将状态标记为待完成,等用户下次操作时自动完成 upload.Status = tables.SliceUploadStatusPendingComplete upload.Message = "All slices completed, waiting for final completion" - + if err := db.UpdateSliceUpload(upload); err != nil { log.Errorf("Failed to update slice upload to pending complete for task %s: %v", upload.TaskID, err) return } - + log.Infof("Task %s marked as pending completion", upload.TaskID) } diff --git a/internal/model/tables/base.go b/internal/model/tables/base.go deleted file mode 100644 index 4debf2285..000000000 --- a/internal/model/tables/base.go +++ /dev/null @@ -1,10 +0,0 @@ -package tables - -import "time" - -// Base 表基础字段 -type Base struct { - ID uint `json:"id" gorm:"primaryKey;autoIncrement"` - CreatedAt time.Time `json:"created_at" gorm:"autoCreateTime"` - UpdatedAt time.Time `json:"updated_at" gorm:"autoUpdateTime"` -} diff --git a/internal/model/tables/slice_upload.go b/internal/model/tables/slice_upload.go index 9a66bb28b..b22149af1 100644 --- a/internal/model/tables/slice_upload.go +++ b/internal/model/tables/slice_upload.go @@ -1,5 +1,7 @@ package tables +import "time" + const ( //SliceUploadStatusWaiting 等待上传 SliceUploadStatusWaiting = iota @@ -19,28 +21,29 @@ const ( // SliceUpload 分片上传数据表 type SliceUpload struct { - Base - TaskID string `json:"task_id" gorm:"uniqueIndex;type:varchar(36)"` // 任务ID,使用UUID - PreupID string `json:"preup_id"` // 网盘返回的预上传id - SliceSize int64 `json:"slice_size"` // 分片大小,单位:字节 - DstID string `json:"dst_id"` // 目标文件夹ID,部分网盘需要 - DstPath string `json:"dst_path"` // 挂载的父文件夹路径 - ActualPath string `json:"actual_path"` //网盘真实父文件夹路径,不同的网盘,这个值可能相同,比如有相同的目录的两个网盘 - Name string `json:"name"` // 文件名 - Size int64 `json:"size"` // 文件大小 - TmpFile string `json:"tmp_file"` //不支持分片上传的文件临时文件路径 - HashMd5 string `json:"hash_md5"` // md5 - HashMd5256KB string `json:"hash_md5_256kb" gorm:"column:hash_md5_256kb;type:varchar(32)"` // md5256KB - HashSha1 string `json:"hash_sha1"` // sha1 - SliceHash string `json:"slice_hash"` // 分片hash - SliceCnt uint `json:"slice_cnt"` // 分片数量 - SliceUploadStatus []byte `json:"slice_upload_status"` //分片上传状态,对应位置1表示分片已上传 - Server string `json:"server"` // 上传服务器 - Status int `json:"status"` //上传状态 - Message string `json:"message"` // 失败错误信息 - Overwrite bool `json:"overwrite"` // 是否覆盖同名文件 - UserID uint `json:"user_id"` //用户id - AsTask bool `json:"as_task"` + TaskID string `json:"task_id" gorm:"primaryKey;type:varchar(36)"` // 任务ID,使用UUID + CreatedAt time.Time `json:"created_at" gorm:"autoCreateTime"` + UpdatedAt time.Time `json:"updated_at" gorm:"autoUpdateTime"` + PreupID string `json:"preup_id"` // 网盘返回的预上传id + SliceSize int64 `json:"slice_size"` // 分片大小,单位:字节 + DstID string `json:"dst_id"` // 目标文件夹ID,部分网盘需要 + DstPath string `json:"dst_path"` // 挂载的父文件夹路径 + ActualPath string `json:"actual_path"` //网盘真实父文件夹路径,不同的网盘,这个值可能相同,比如有相同的目录的两个网盘 + Name string `json:"name"` // 文件名 + Size int64 `json:"size"` // 文件大小 + TmpFile string `json:"tmp_file"` //不支持分片上传的文件临时文件路径 + HashMd5 string `json:"hash_md5"` // md5 + HashMd5256KB string `json:"hash_md5_256kb" gorm:"column:hash_md5_256kb;type:varchar(32)"` // md5256KB + HashSha1 string `json:"hash_sha1"` // sha1 + SliceHash string `json:"slice_hash"` // 分片hash + SliceCnt uint `json:"slice_cnt"` // 分片数量 + SliceUploadStatus []byte `json:"slice_upload_status"` //分片上传状态,对应位置1表示分片已上传 + Server string `json:"server"` // 上传服务器 + Status int `json:"status"` //上传状态 + Message string `json:"message"` // 失败错误信息 + Overwrite bool `json:"overwrite"` // 是否覆盖同名文件 + UserID uint `json:"user_id"` //用户id + AsTask bool `json:"as_task"` } // IsSliceUploaded 判断第i个分片是否已上传 diff --git a/internal/offline_download/tool/add.go b/internal/offline_download/tool/add.go index aea88e2a4..50b95bac3 100644 --- a/internal/offline_download/tool/add.go +++ b/internal/offline_download/tool/add.go @@ -2,18 +2,15 @@ package tool import ( "context" - "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser" - - _115_open "github.com/OpenListTeam/OpenList/v4/drivers/115_open" - "github.com/OpenListTeam/OpenList/v4/server/common" - "net/url" stdpath "path" "path/filepath" _115 "github.com/OpenListTeam/OpenList/v4/drivers/115" + _115_open "github.com/OpenListTeam/OpenList/v4/drivers/115_open" "github.com/OpenListTeam/OpenList/v4/drivers/pikpak" "github.com/OpenListTeam/OpenList/v4/drivers/thunder" + "github.com/OpenListTeam/OpenList/v4/drivers/thunder_browser" "github.com/OpenListTeam/OpenList/v4/drivers/thunderx" "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/errs" @@ -22,6 +19,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/setting" "github.com/OpenListTeam/OpenList/v4/internal/task" + "github.com/OpenListTeam/OpenList/v4/server/common" "github.com/google/uuid" "github.com/pkg/errors" ) @@ -87,7 +85,7 @@ func AddURL(ctx context.Context, args *AddURLArgs) (task.TaskExtensionInfo, erro } uid := uuid.NewString() - tempDir := filepath.Join(conf.Conf.TempDir, args.Tool, uid) + tempDir := filepath.Join(conf.GetPersistentTempDir(), args.Tool, uid) deletePolicy := args.DeletePolicy // 如果当前 storage 是对应网盘,则直接下载到目标路径,无需转存 diff --git a/pkg/tempdir/tempdir.go b/pkg/tempdir/tempdir.go deleted file mode 100644 index 026e37fca..000000000 --- a/pkg/tempdir/tempdir.go +++ /dev/null @@ -1,35 +0,0 @@ -package tempdir - -import ( - "os" - "path/filepath" - "github.com/OpenListTeam/OpenList/v4/internal/conf" -) - -// GetPersistentTempDir 获取持久化临时目录 -// 这个函数被多个包共享使用,避免代码重复 -func GetPersistentTempDir() string { - var tempDir string - - // 优先使用配置的临时目录 - if conf.Conf != nil && conf.Conf.TempDir != "" { - tempDir = conf.Conf.TempDir - } else { - // 使用数据目录下的slice_temp子目录 - if conf.Conf != nil && conf.Conf.Database.DBFile != "" { - // 从数据库文件路径推断数据目录 - dataDir := filepath.Dir(conf.Conf.Database.DBFile) - tempDir = filepath.Join(dataDir, "slice_temp") - } else { - // fallback到当前工作目录下的slice_temp - if wd, err := os.Getwd(); err == nil { - tempDir = filepath.Join(wd, "slice_temp") - } else { - // 最后的fallback - tempDir = filepath.Join(os.TempDir(), "openlist_slice_temp") - } - } - } - - return tempDir -} From dc501f02a07ad300c382a160402e5f1d36a1109b Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sat, 6 Sep 2025 12:04:20 +0800 Subject: [PATCH 14/26] refactor(upload): Optimize SliceUploadManager with singleflight for session management and improve session loading logic --- internal/fs/sliceup.go | 137 +++++------------------------------------ 1 file changed, 15 insertions(+), 122 deletions(-) diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go index 1cab688f3..aac07769f 100644 --- a/internal/fs/sliceup.go +++ b/internal/fs/sliceup.go @@ -18,6 +18,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/model/tables" "github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/internal/stream" + "github.com/OpenListTeam/OpenList/v4/pkg/singleflight" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/google/uuid" "github.com/pkg/errors" @@ -27,7 +28,8 @@ import ( // SliceUploadManager 分片上传管理器 type SliceUploadManager struct { - cache sync.Map // TaskID -> *SliceUploadSession + sessionG singleflight.Group[*SliceUploadSession] + cache sync.Map // TaskID -> *SliceUploadSession } // SliceUploadSession 分片上传会话 @@ -112,7 +114,7 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D log.Error(err) return nil, errors.WithStack(err) } - user, _ := ctx.Value(conf.UserKey).(*model.User) + user := ctx.Value(conf.UserKey).(*model.User) // 生成唯一的TaskID taskID := uuid.New().String() @@ -130,9 +132,7 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D Overwrite: req.Overwrite, ActualPath: actualPath, AsTask: req.AsTask, - } - if user != nil { - createsu.UserID = user.ID + UserID: user.ID, } log.Infof("storage mount path %s", storage.GetStorage().MountPath) @@ -185,50 +185,26 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D // getOrLoadSession 获取或加载会话,提高代码复用性 func (m *SliceUploadManager) getOrLoadSession(taskID string) (*SliceUploadSession, error) { - sa, loaded := m.cache.LoadOrStore(taskID, (*SliceUploadSession)(nil)) - if !loaded { - // 首次加载,需要从数据库获取 - su, err := db.GetSliceUploadByTaskID(taskID) - if err != nil { - m.cache.Delete(taskID) // 清理无效的 key - return nil, errors.WithMessagef(err, "failed get slice upload [%s]", taskID) - } - session := &SliceUploadSession{ - SliceUpload: su, + session, err, _ := m.sessionG.Do(taskID, func() (*SliceUploadSession, error) { + if s, ok := m.cache.Load(taskID); ok { + return s.(*SliceUploadSession), nil } - m.cache.Store(taskID, session) - return session, nil - } - - // 缓存中存在,但可能是nil值,需要检查 - if sa == nil { - // 说明之前存储了nil,需要重新从数据库加载 + // 首次加载,需要从数据库获取 su, err := db.GetSliceUploadByTaskID(taskID) if err != nil { - m.cache.Delete(taskID) return nil, errors.WithMessagef(err, "failed get slice upload [%s]", taskID) } - session := &SliceUploadSession{ + s := &SliceUploadSession{ SliceUpload: su, } - m.cache.Store(taskID, session) - return session, nil - } - - session := sa.(*SliceUploadSession) - // 刷新数据库状态以确保数据一致性 - if freshSu, err := db.GetSliceUploadByTaskID(taskID); err == nil { - session.mutex.Lock() - session.SliceUpload = freshSu - session.mutex.Unlock() - } - return session, nil + m.cache.Store(taskID, s) + return s, nil + }) + return session, err } // UploadSlice 流式上传分片 - 支持流式上传,避免表单上传的内存占用 func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, reader io.Reader) error { - var err error - session, err := m.getOrLoadSession(req.TaskID) if err != nil { log.Errorf("failed to get session: %+v", err) @@ -283,40 +259,7 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri // 根据存储类型处理分片上传 switch s := storage.(type) { case driver.ISliceUpload: - log.Info("SliceUpload support") - // 对于支持原生分片上传的驱动,我们需要将流数据缓存到临时文件中 - // 以支持重试和断点续传场景 - if err := session.ensureTmpFile(); err != nil { - log.Error("ensureTmpFile error for native slice upload", req, err) - return err - } - - // 将流数据写入临时文件的指定位置 - sw := &sliceWriter{ - file: session.tmpFile, - offset: int64(req.SliceNum) * int64(session.SliceSize), - } - writtenBytes, err := utils.CopyWithBuffer(sw, reader) - if err != nil { - log.Error("Copy to temp file error for native slice upload", req, err) - return err - } - log.Debugf("Written %d bytes to temp file for slice %d", writtenBytes, req.SliceNum) - - // 从临时文件读取数据进行上传 - sliceSize := session.SliceSize - if req.SliceNum == session.SliceCnt-1 { - // 最后一个分片,计算实际大小 - sliceSize = session.Size - int64(req.SliceNum)*int64(session.SliceSize) - } - - sliceReader := &sliceReader{ - file: session.tmpFile, - offset: int64(req.SliceNum) * int64(session.SliceSize), - size: sliceSize, - } - - if err := s.SliceUpload(ctx, session.SliceUpload, req.SliceNum, sliceReader); err != nil { + if err := s.SliceUpload(ctx, session.SliceUpload, req.SliceNum, reader); err != nil { log.Error("SliceUpload error", req, err) return err } @@ -578,56 +521,6 @@ func (sw *sliceWriter) Write(p []byte) (int, error) { return n, err } -// sliceReader 用于从临时文件中读取指定分片的数据,支持断点续传 -type sliceReader struct { - file *os.File - offset int64 - size int64 - position int64 // 当前读取位置(相对于分片开始) -} - -// Read implements io.Reader interface -func (sr *sliceReader) Read(p []byte) (int, error) { - if sr.position >= sr.size { - return 0, io.EOF - } - - // 计算实际可读取的字节数 - remaining := sr.size - sr.position - if int64(len(p)) > remaining { - p = p[:remaining] - } - - n, err := sr.file.ReadAt(p, sr.offset+sr.position) - sr.position += int64(n) - return n, err -} - -// Seek implements io.Seeker interface,支持重试场景 -func (sr *sliceReader) Seek(offset int64, whence int) (int64, error) { - var newPos int64 - switch whence { - case io.SeekStart: - newPos = offset - case io.SeekCurrent: - newPos = sr.position + offset - case io.SeekEnd: - newPos = sr.size + offset - default: - return 0, fmt.Errorf("invalid whence value: %d", whence) - } - - if newPos < 0 { - return 0, fmt.Errorf("negative position: %d", newPos) - } - if newPos > sr.size { - newPos = sr.size - } - - sr.position = newPos - return newPos, nil -} - // recoverIncompleteUploads 恢复重启后未完成的上传任务 func (m *SliceUploadManager) recoverIncompleteUploads() { defer func() { From bba51f4a00aa5865898c86565727999aabef1fdd Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Sat, 6 Sep 2025 12:38:13 +0800 Subject: [PATCH 15/26] =?UTF-8?q?feat(upload):=20=E6=94=AF=E6=8C=81?= =?UTF-8?q?=E9=87=8D=E5=90=AF=E5=90=8E=E6=81=A2=E5=A4=8D=E4=B8=8A=E4=BC=A0?= =?UTF-8?q?=E4=BB=BB=E5=8A=A1=EF=BC=8C=E4=BC=98=E5=8C=96=E4=B8=B4=E6=97=B6?= =?UTF-8?q?=E6=96=87=E4=BB=B6=E9=AA=8C=E8=AF=81=E5=92=8C=E6=97=A5=E5=BF=97?= =?UTF-8?q?=E8=AE=B0=E5=BD=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/fs/sliceup.go | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go index aac07769f..4f20e4447 100644 --- a/internal/fs/sliceup.go +++ b/internal/fs/sliceup.go @@ -54,7 +54,10 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D "dst_path": req.Path, "name": req.Name, "size": req.Size, - "status": tables.SliceUploadStatusUploading, // 只查找正在进行中的任务 + "status": []int{ + tables.SliceUploadStatusWaiting, // 等待状态(重启后恢复) + tables.SliceUploadStatusUploading, // 上传中状态 + }, } if req.Hash.Md5 != "" { wh["hash_md5"] = req.Hash.Md5 @@ -73,7 +76,7 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D } if su.TaskID != "" { // 找到未完成的上传任务,支持断点续传 - // 验证临时文件是否仍然存在(重启后可能被清理) + // 验证临时文件是否仍然存在(仅对非原生分片上传) if su.TmpFile != "" { if _, err := os.Stat(su.TmpFile); os.IsNotExist(err) { // 临时文件丢失,清理数据库记录,重新开始 @@ -83,11 +86,12 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D } // 继续创建新任务 } else { - // 临时文件存在,可以继续断点续传 + // Temporary file exists, can continue resumable upload (traditional upload mode) session := &SliceUploadSession{SliceUpload: su} m.cache.Store(su.TaskID, session) - log.Infof("Resuming slice upload after restart: %s, completed slices: %d/%d", - su.TaskID, tables.CountUploadedSlices(su.SliceUploadStatus), su.SliceCnt) + completedSlices := tables.CountUploadedSlices(su.SliceUploadStatus) + log.Infof("Resuming file-based slice upload: %s, completed: %d/%d", + su.TaskID, completedSlices, su.SliceCnt) return &reqres.PreupResp{ TaskID: su.TaskID, SliceSize: su.SliceSize, @@ -96,10 +100,12 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D }, nil } } else { - // 原生分片上传(如123open/baidu),无需临时文件 + // Native slice upload, relying on frontend intelligent retry and state sync session := &SliceUploadSession{SliceUpload: su} m.cache.Store(su.TaskID, session) - log.Infof("Resuming native slice upload after restart: %s", su.TaskID) + completedSlices := tables.CountUploadedSlices(su.SliceUploadStatus) + log.Infof("Resuming native slice upload: %s, completed: %d/%d, relying on frontend sync", + su.TaskID, completedSlices, su.SliceCnt) return &reqres.PreupResp{ TaskID: su.TaskID, SliceSize: su.SliceSize, @@ -259,10 +265,14 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri // 根据存储类型处理分片上传 switch s := storage.(type) { case driver.ISliceUpload: + // Native slice upload: directly pass stream data, let frontend handle retry and recovery if err := s.SliceUpload(ctx, session.SliceUpload, req.SliceNum, reader); err != nil { - log.Error("SliceUpload error", req, err) - return err + log.Errorf("Native slice upload failed - TaskID: %s, SliceNum: %d, Error: %v", + req.TaskID, req.SliceNum, err) + return errors.WithMessagef(err, "slice %d upload failed", req.SliceNum) } + log.Debugf("Native slice upload success - TaskID: %s, SliceNum: %d", + req.TaskID, req.SliceNum) default: //其他网盘先缓存到本地 if err := session.ensureTmpFile(); err != nil { From e36541736b8be5a6140d7230620f1aad1fdbe931 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sat, 6 Sep 2025 14:38:22 +0800 Subject: [PATCH 16/26] =?UTF-8?q?=E4=B8=8A=E4=BC=A0=E6=97=B6=E6=A3=80?= =?UTF-8?q?=E6=9F=A5=E6=98=AF=E5=90=A6=E8=A6=86=E7=9B=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- server/handles/fsup.go | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/server/handles/fsup.go b/server/handles/fsup.go index 0904de52d..018a9f98f 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -237,7 +237,7 @@ func FsPreup(c *gin.Context) { common.ErrorResp(c, fmt.Errorf("invalid request body: %w", err), 400) return } - + // 基本参数验证 if req.Name == "" { common.ErrorResp(c, fmt.Errorf("file name is required"), 400) @@ -247,9 +247,15 @@ func FsPreup(c *gin.Context) { common.ErrorResp(c, fmt.Errorf("file size must be greater than 0"), 400) return } - + storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) path := c.Request.Context().Value(conf.PathKey).(string) + if !req.Overwrite { + if res, _ := fs.Get(c.Request.Context(), path, &fs.GetArgs{NoLog: true}); res != nil { + common.ErrorStrResp(c, "file exists", 403) + return + } + } res, err := fs.Preup(c.Request.Context(), storage, path, req) if err != nil { @@ -261,50 +267,56 @@ func FsPreup(c *gin.Context) { // FsUpSlice 流式上传分片 - 使用PUT方法进行流式上传,避免表单上传的内存占用 func FsUpSlice(c *gin.Context) { + defer func() { + if n, _ := io.ReadFull(c.Request.Body, []byte{0}); n == 1 { + _, _ = utils.CopyWithBuffer(io.Discard, c.Request.Body) + } + _ = c.Request.Body.Close() + }() // 从HTTP头获取参数 taskID := c.GetHeader("X-Task-ID") if taskID == "" { common.ErrorResp(c, fmt.Errorf("X-Task-ID header is required"), 400) return } - + sliceNumStr := c.GetHeader("X-Slice-Num") if sliceNumStr == "" { common.ErrorResp(c, fmt.Errorf("X-Slice-Num header is required"), 400) return } - + sliceNum, err := strconv.ParseUint(sliceNumStr, 10, 32) if err != nil { common.ErrorResp(c, fmt.Errorf("invalid X-Slice-Num: %w", err), 400) return } - + sliceHash := c.GetHeader("X-Slice-Hash") - + // 构建请求对象 req := &reqres.UploadSliceReq{ TaskID: taskID, SliceHash: sliceHash, SliceNum: uint(sliceNum), } - + // 获取请求体作为流 reader := c.Request.Body if reader == nil { common.ErrorResp(c, fmt.Errorf("request body is required"), 400) return } - + storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) - + // 调用流式上传分片函数 err = fs.UploadSlice(c.Request.Context(), storage, req, reader) if err != nil { common.ErrorResp(c, fmt.Errorf("upload slice failed: %w", err), 500) return } - + common.SuccessResp(c) } @@ -316,12 +328,12 @@ func FsUpSliceComplete(c *gin.Context) { common.ErrorResp(c, fmt.Errorf("invalid request body: %w", err), 400) return } - + if req.TaskID == "" { common.ErrorResp(c, fmt.Errorf("task_id is required"), 400) return } - + storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) rsp, err := fs.SliceUpComplete(c.Request.Context(), storage, req.TaskID) if err != nil { From e68f04e70e6beb141be7baac17a25509a0cf771f Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Sat, 6 Sep 2025 14:49:09 +0800 Subject: [PATCH 17/26] =?UTF-8?q?fix(upload):=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E5=88=86=E7=89=87=E4=B8=8A=E4=BC=A0=E6=97=B6hash=E5=80=BC?= =?UTF-8?q?=E9=AA=8C=E8=AF=81=E9=80=BB=E8=BE=91=EF=BC=8C=E7=A1=AE=E4=BF=9D?= =?UTF-8?q?=E5=AE=8C=E6=95=B4hash=E5=88=97=E8=A1=A8=E4=B8=8D=E8=A2=AB?= =?UTF-8?q?=E8=A6=86=E7=9B=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/fs/sliceup.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go index 4f20e4447..5178208b5 100644 --- a/internal/fs/sliceup.go +++ b/internal/fs/sliceup.go @@ -246,8 +246,8 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri if req.SliceHash != "" { session.mutex.Lock() - //验证分片hash值 - if req.SliceNum == 0 { //第一个分片,slicehash是所有的分片hash + // 验证分片hash值 + if req.SliceNum == 0 { // 第一个分片,slicehash是所有的分片hash hs := strings.Split(req.SliceHash, ",") if len(hs) != int(session.SliceCnt) { session.mutex.Unlock() @@ -257,7 +257,8 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri } session.SliceHash = req.SliceHash // 存储完整的hash字符串 } else { - session.SliceHash = req.SliceHash // 存储单个分片hash + // 非第0个分片,不覆盖 SliceHash,保持完整的hash列表 + log.Debugf("Slice %d hash: %s (keeping complete hash list)", req.SliceNum, req.SliceHash) } session.mutex.Unlock() } From 34c2b7deb2c69db98213174b03808d9dc4dcc980 Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Sat, 6 Sep 2025 15:22:28 +0800 Subject: [PATCH 18/26] =?UTF-8?q?fix(upload):=20=E4=BF=AE=E5=A4=8DFsPreup?= =?UTF-8?q?=E5=87=BD=E6=95=B0=E4=B8=AD=E7=9A=84=E6=96=87=E4=BB=B6=E5=AD=98?= =?UTF-8?q?=E5=9C=A8=E6=A3=80=E6=9F=A5=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- server/handles/fsup.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/server/handles/fsup.go b/server/handles/fsup.go index 018a9f98f..690274b4d 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -250,11 +250,10 @@ func FsPreup(c *gin.Context) { storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) path := c.Request.Context().Value(conf.PathKey).(string) - if !req.Overwrite { - if res, _ := fs.Get(c.Request.Context(), path, &fs.GetArgs{NoLog: true}); res != nil { - common.ErrorStrResp(c, "file exists", 403) - return - } + fullPath := utils.FixAndCleanPath(stdpath.Join(path, req.Name)) + if res, _ := fs.Get(c.Request.Context(), fullPath, &fs.GetArgs{NoLog: true}); res != nil && !req.Overwrite { + common.ErrorStrResp(c, "file exists", 403) + return } res, err := fs.Preup(c.Request.Context(), storage, path, req) From bfdf98f7d33f176a45ad878608ff16279c50a874 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sat, 6 Sep 2025 15:39:19 +0800 Subject: [PATCH 19/26] =?UTF-8?q?fix(upload):=20=E4=BF=AE=E5=A4=8DFsPreup?= =?UTF-8?q?=E5=87=BD=E6=95=B0=E4=B8=AD=E7=9A=84=E6=96=87=E4=BB=B6=E5=AD=98?= =?UTF-8?q?=E5=9C=A8=E6=A3=80=E6=9F=A5=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- server/handles/fsup.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/server/handles/fsup.go b/server/handles/fsup.go index 018a9f98f..a591fb4b1 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -251,7 +251,8 @@ func FsPreup(c *gin.Context) { storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) path := c.Request.Context().Value(conf.PathKey).(string) if !req.Overwrite { - if res, _ := fs.Get(c.Request.Context(), path, &fs.GetArgs{NoLog: true}); res != nil { + fullPath := utils.FixAndCleanPath(stdpath.Join(path, req.Name)) + if res, _ := fs.Get(c.Request.Context(), fullPath, &fs.GetArgs{NoLog: true}); res != nil { common.ErrorStrResp(c, "file exists", 403) return } From 9c8f70fccbd54f48db472105bf8e6ed2cce3987e Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Sat, 6 Sep 2025 16:22:19 +0800 Subject: [PATCH 20/26] =?UTF-8?q?fix(upload):=20=E7=B2=BE=E7=AE=80CreateSe?= =?UTF-8?q?ssion=E5=92=8CCompleteUpload=E5=87=BD=E6=95=B0=EF=BC=8C?= =?UTF-8?q?=E7=A7=BB=E9=99=A4=E5=86=97=E4=BD=99=E9=80=BB=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/fs/sliceup.go | 84 +++--------------------------------------- 1 file changed, 5 insertions(+), 79 deletions(-) diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go index 6cd539a12..1b7b31177 100644 --- a/internal/fs/sliceup.go +++ b/internal/fs/sliceup.go @@ -23,7 +23,6 @@ import ( "github.com/google/uuid" "github.com/pkg/errors" log "github.com/sirupsen/logrus" - "gorm.io/gorm" ) // SliceUploadManager 分片上传管理器 @@ -36,85 +35,18 @@ type SliceUploadManager struct { type SliceUploadSession struct { *tables.SliceUpload tmpFile *os.File - mutex sync.Mutex // 使用Mutex而不是RWMutex,保持与原始实现一致 + mutex sync.Mutex } // NewSliceUploadManager 创建分片上传管理器 func NewSliceUploadManager() *SliceUploadManager { manager := &SliceUploadManager{} - // 系统重启后清理未完成的上传任务,因为前端session会失效 go manager.cleanupIncompleteUploads() return manager } -// CreateSession 创建新的上传会话 - 完整实现Preup逻辑 +// CreateSession 创建新的上传会话 func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.Driver, actualPath string, req *reqres.PreupReq) (*reqres.PreupResp, error) { - // 检查是否存在未完成的上传任务(用于断点续传) - wh := map[string]any{ - "dst_path": req.Path, - "name": req.Name, - "size": req.Size, - "status": []int{ - tables.SliceUploadStatusWaiting, // 等待状态(重启后恢复) - tables.SliceUploadStatusUploading, // 上传中状态 - }, - } - if req.Hash.Md5 != "" { - wh["hash_md5"] = req.Hash.Md5 - } - if req.Hash.Sha1 != "" { - wh["hash_sha1"] = req.Hash.Sha1 - } - if req.Hash.Md5256KB != "" { - wh["hash_md5_256kb"] = req.Hash.Md5256KB - } - - su, err := db.GetSliceUpload(wh) - if err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - log.Error("GetSliceUpload", err) - return nil, errors.WithStack(err) - } - - if su.TaskID != "" { // 找到未完成的上传任务,支持断点续传 - // 验证临时文件是否仍然存在(仅对非原生分片上传) - if su.TmpFile != "" { - if _, err := os.Stat(su.TmpFile); os.IsNotExist(err) { - // 临时文件丢失,清理数据库记录,重新开始 - log.Warnf("Temporary file lost after restart, cleaning up task: %s", su.TaskID) - if deleteErr := db.DeleteSliceUploadByTaskID(su.TaskID); deleteErr != nil { - log.Errorf("Failed to delete lost slice upload task: %v", deleteErr) - } - // 继续创建新任务 - } else { - // Temporary file exists, can continue resumable upload (traditional upload mode) - session := &SliceUploadSession{SliceUpload: su} - m.cache.Store(su.TaskID, session) - completedSlices := tables.CountUploadedSlices(su.SliceUploadStatus) - log.Infof("Resuming file-based slice upload: %s, completed: %d/%d", - su.TaskID, completedSlices, su.SliceCnt) - return &reqres.PreupResp{ - TaskID: su.TaskID, - SliceSize: su.SliceSize, - SliceCnt: su.SliceCnt, - SliceUploadStatus: su.SliceUploadStatus, - }, nil - } - } else { - // Native slice upload, relying on frontend intelligent retry and state sync - session := &SliceUploadSession{SliceUpload: su} - m.cache.Store(su.TaskID, session) - completedSlices := tables.CountUploadedSlices(su.SliceUploadStatus) - log.Infof("Resuming native slice upload: %s, completed: %d/%d, relying on frontend sync", - su.TaskID, completedSlices, su.SliceCnt) - return &reqres.PreupResp{ - TaskID: su.TaskID, - SliceSize: su.SliceSize, - SliceCnt: su.SliceCnt, - SliceUploadStatus: su.SliceUploadStatus, - }, nil - } - } - srcobj, err := op.Get(ctx, storage, actualPath) if err != nil { log.Error(err) @@ -307,7 +239,7 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri return nil } -// CompleteUpload 完成上传 - 完整实现原始逻辑 +// CompleteUpload 完成上传 func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver.Driver, taskID string) (*reqres.UploadSliceCompleteResp, error) { var err error @@ -320,10 +252,9 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. // 检查是否所有分片都已上传 session.mutex.Lock() allUploaded := tables.IsAllSliceUploaded(session.SliceUploadStatus, session.SliceCnt) - isPendingComplete := session.Status == tables.SliceUploadStatusPendingComplete session.mutex.Unlock() - if !allUploaded && !isPendingComplete { + if !allUploaded { return &reqres.UploadSliceCompleteResp{ Complete: 0, SliceUploadStatus: session.SliceUploadStatus, @@ -331,11 +262,6 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. }, nil } - // 如果是PendingComplete状态,说明是重启后恢复的任务,直接尝试完成 - if isPendingComplete { - log.Infof("Processing pending complete task after restart: %s", session.TaskID) - } - defer func() { // 确保资源清理和缓存删除 session.cleanup() @@ -463,7 +389,7 @@ func (s *SliceUploadSession) ensureTmpFile() error { s.TmpFile = tmpPath s.tmpFile = tf - // 更新数据库中的临时文件路径,支持重启后恢复 + // 更新数据库中的临时文件路径 if updateErr := db.UpdateSliceUpload(s.SliceUpload); updateErr != nil { log.Errorf("Failed to update temp file path in database: %v", updateErr) // 不返回错误,因为文件已经创建成功,只是数据库更新失败 From f995c48903efa2c502c8c4285f382306423463e7 Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Sat, 6 Sep 2025 16:44:32 +0800 Subject: [PATCH 21/26] =?UTF-8?q?fix(upload):=20=E5=88=9D=E5=A7=8B?= =?UTF-8?q?=E5=8C=96=E5=88=86=E7=89=87=E4=B8=8A=E4=BC=A0=E7=AE=A1=E7=90=86?= =?UTF-8?q?=E5=99=A8=E5=B9=B6=E4=BC=98=E5=8C=96=E6=B8=85=E7=90=86=E9=80=BB?= =?UTF-8?q?=E8=BE=91?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- cmd/server.go | 1 + internal/fs/sliceup.go | 98 +++++++++++++++++++++++++++++++++--------- 2 files changed, 79 insertions(+), 20 deletions(-) diff --git a/cmd/server.go b/cmd/server.go index 3758009f6..b9b7d9118 100644 --- a/cmd/server.go +++ b/cmd/server.go @@ -44,6 +44,7 @@ the address is defined in config file`, bootstrap.InitOfflineDownloadTools() bootstrap.LoadStorages() bootstrap.InitTaskManager() + fs.InitSliceUploadManager() if !flags.Debug && !flags.Dev { gin.SetMode(gin.ReleaseMode) } diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go index 1b7b31177..c6ddc7f43 100644 --- a/internal/fs/sliceup.go +++ b/internal/fs/sliceup.go @@ -434,6 +434,11 @@ func (s *SliceUploadSession) cleanup() { var globalSliceManager *SliceUploadManager var globalSliceManagerOnce sync.Once +func InitSliceUploadManager() { + log.Info("Initializing slice upload manager...") + getGlobalSliceManager() +} + // getGlobalSliceManager 获取全局分片上传管理器(延迟初始化) func getGlobalSliceManager() *SliceUploadManager { globalSliceManagerOnce.Do(func() { @@ -466,36 +471,33 @@ func (m *SliceUploadManager) cleanupIncompleteUploads() { } }() - // 等待一段时间,确保系统完全启动 time.Sleep(10 * time.Second) log.Info("Starting cleanup of incomplete slice uploads after restart...") - // 查询所有未完成的上传任务 incompleteUploads, err := db.GetIncompleteSliceUploads() if err != nil { log.Errorf("Failed to get incomplete slice uploads: %v", err) - return - } - - if len(incompleteUploads) == 0 { - log.Info("No incomplete slice uploads found") - return - } - - log.Infof("Found %d incomplete slice uploads, starting cleanup...", len(incompleteUploads)) - - cleanedCount := 0 - for _, upload := range incompleteUploads { - if m.cleanupSingleUpload(upload) { - cleanedCount++ + } else { + if len(incompleteUploads) == 0 { + log.Info("No incomplete slice uploads found in database") + } else { + log.Infof("Found %d incomplete slice uploads in database, starting cleanup...", len(incompleteUploads)) + cleanedCount := 0 + for _, upload := range incompleteUploads { + if m.cleanupSingleUpload(upload) { + cleanedCount++ + } + } + log.Infof("Database cleanup completed, cleaned up %d tasks", cleanedCount) } } - log.Infof("Slice upload cleanup completed, cleaned up %d tasks", cleanedCount) + m.cleanupOrphanedTempFiles() + + log.Info("Slice upload cleanup completed") } -// cleanupSingleUpload 清理单个上传任务 func (m *SliceUploadManager) cleanupSingleUpload(upload *tables.SliceUpload) bool { defer func() { if r := recover(); r != nil { @@ -505,7 +507,6 @@ func (m *SliceUploadManager) cleanupSingleUpload(upload *tables.SliceUpload) boo log.Infof("Cleaning up upload task: %s, status: %s", upload.TaskID, upload.Status) - // 清理临时文件 if upload.TmpFile != "" { if err := os.Remove(upload.TmpFile); err != nil && !os.IsNotExist(err) { log.Warnf("Failed to remove temp file %s for task %s: %v", upload.TmpFile, upload.TaskID, err) @@ -514,7 +515,6 @@ func (m *SliceUploadManager) cleanupSingleUpload(upload *tables.SliceUpload) boo } } - // 从数据库中删除任务记录 if err := db.DeleteSliceUploadByTaskID(upload.TaskID); err != nil { log.Errorf("Failed to delete slice upload task %s: %v", upload.TaskID, err) return false @@ -523,3 +523,61 @@ func (m *SliceUploadManager) cleanupSingleUpload(upload *tables.SliceUpload) boo log.Infof("Successfully cleaned up task: %s", upload.TaskID) return true } + +func (m *SliceUploadManager) cleanupOrphanedTempFiles() { + defer func() { + if r := recover(); r != nil { + log.Errorf("Panic in cleanupOrphanedTempFiles: %v", r) + } + }() + + tempDir := conf.GetPersistentTempDir() + if tempDir == "" { + log.Warn("Persistent temp directory not configured, skipping orphaned file cleanup") + return + } + + log.Infof("Cleaning up orphaned temp files in: %s", tempDir) + + entries, err := os.ReadDir(tempDir) + if err != nil { + log.Errorf("Failed to read temp directory %s: %v", tempDir, err) + return + } + + orphanedCount := 0 + for _, entry := range entries { + if entry.IsDir() { + continue + } + + fileName := entry.Name() + if !strings.HasPrefix(fileName, "slice_upload_") { + continue + } + + filePath := filepath.Join(tempDir, fileName) + fileInfo, err := entry.Info() + if err != nil { + log.Warnf("Failed to get file info for %s: %v", filePath, err) + continue + } + + if time.Since(fileInfo.ModTime()) < 24*time.Hour { + continue + } + + if err := os.Remove(filePath); err != nil { + log.Warnf("Failed to remove orphaned temp file %s: %v", filePath, err) + } else { + log.Debugf("Removed orphaned temp file: %s", filePath) + orphanedCount++ + } + } + + if orphanedCount > 0 { + log.Infof("Cleaned up %d orphaned temp files", orphanedCount) + } else { + log.Info("No orphaned temp files found") + } +} From 9d3d11b382b67d102eff21958d3dc1ab62430a5e Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Sat, 6 Sep 2025 17:03:21 +0800 Subject: [PATCH 22/26] =?UTF-8?q?fix(upload):=20=E7=A7=BB=E9=99=A4?= =?UTF-8?q?=E5=86=97=E4=BD=99?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/db/db.go | 3 +-- internal/db/slice_upload.go | 20 +------------------- internal/driver/driver.go | 5 ----- internal/fs/fs.go | 5 ----- internal/fs/sliceup.go | 34 ++++++---------------------------- server/handles/fsup.go | 5 ----- 6 files changed, 8 insertions(+), 64 deletions(-) diff --git a/internal/db/db.go b/internal/db/db.go index faa61d002..172ea6be1 100644 --- a/internal/db/db.go +++ b/internal/db/db.go @@ -17,8 +17,7 @@ func Init(d *gorm.DB) { if err != nil { log.Fatalf("failed migrate database: %s", err.Error()) } - - // 清理启动前遗留的孤儿分片上传任务 + if err := CleanupOrphanedSliceUploads(); err != nil { log.Errorf("Failed to cleanup orphaned slice uploads: %v", err) } diff --git a/internal/db/slice_upload.go b/internal/db/slice_upload.go index 218937d6c..de0b5f778 100644 --- a/internal/db/slice_upload.go +++ b/internal/db/slice_upload.go @@ -70,23 +70,19 @@ func UpdateSliceUploadWithTx(su *tables.SliceUpload) error { // UpdateSliceStatusAtomic 原子性地更新分片状态 func UpdateSliceStatusAtomic(taskID string, sliceNum int, status []byte) error { return errors.WithStack(db.Transaction(func(tx *gorm.DB) error { - // 先读取当前状态 var su tables.SliceUpload if err := tx.Where("task_id = ?", taskID).First(&su).Error; err != nil { return err } - // 更新分片状态 tables.SetSliceUploaded(su.SliceUploadStatus, sliceNum) - // 保存更新 return tx.Save(&su).Error })) } // CleanupOrphanedSliceUploads 清理孤儿分片上传记录(启动时调用) func CleanupOrphanedSliceUploads() error { - // 清理超过24小时的未完成任务 cutoff := time.Now().Add(-24 * time.Hour) var orphanedTasks []tables.SliceUpload @@ -99,7 +95,6 @@ func CleanupOrphanedSliceUploads() error { cleanedCount := 0 for _, task := range orphanedTasks { - // 清理临时文件 if task.TmpFile != "" { if err := os.Remove(task.TmpFile); err != nil && !os.IsNotExist(err) { log.Warnf("Failed to remove orphaned tmp file %s: %v", task.TmpFile, err) @@ -108,7 +103,6 @@ func CleanupOrphanedSliceUploads() error { } } - // 删除数据库记录 if err := db.Delete(&task).Error; err != nil { log.Errorf("Failed to delete orphaned slice upload task %s: %v", task.TaskID, err) } else { @@ -120,22 +114,18 @@ func CleanupOrphanedSliceUploads() error { log.Infof("Cleaned up %d orphaned slice upload tasks", cleanedCount) } - // 额外清理:扫描临时目录中的孤儿文件 return cleanupOrphanedTempFiles() } // cleanupOrphanedTempFiles 清理临时目录中的孤儿文件 func cleanupOrphanedTempFiles() error { - // 获取临时目录路径,使用共享的tempdir包 tempDir := conf.GetPersistentTempDir() - // 检查临时目录是否存在 if _, err := os.Stat(tempDir); os.IsNotExist(err) { log.Debugf("Temp directory does not exist: %s", tempDir) return nil } - // 获取所有活跃的分片上传任务的临时文件列表 var activeTasks []tables.SliceUpload if err := db.Where("tmp_file IS NOT NULL AND tmp_file != '' AND status IN (?, ?)", tables.SliceUploadStatusWaiting, @@ -143,7 +133,6 @@ func cleanupOrphanedTempFiles() error { return errors.WithStack(err) } - // 构建活跃文件的映射表 activeFiles := make(map[string]bool) for _, task := range activeTasks { if task.TmpFile != "" { @@ -152,43 +141,36 @@ func cleanupOrphanedTempFiles() error { } cleanedCount := 0 - cutoff := time.Now().Add(-24 * time.Hour) // 只清理超过24小时的文件 + cutoff := time.Now().Add(-24 * time.Hour) - // 遍历临时目录 err := filepath.WalkDir(tempDir, func(path string, d fs.DirEntry, err error) error { if err != nil { log.Warnf("Failed to access path %s: %v", path, err) return nil // 继续处理其他文件 } - // 跳过目录 if d.IsDir() { return nil } - // 只处理分片上传临时文件(以slice_upload_开头) if !strings.HasPrefix(d.Name(), "slice_upload_") { return nil } - // 检查文件是否在活跃任务列表中 if activeFiles[path] { return nil // 文件仍在使用中,跳过 } - // 检查文件修改时间 info, err := d.Info() if err != nil { log.Warnf("Failed to get file info for %s: %v", path, err) return nil } - // 只清理超过24小时的文件 if info.ModTime().After(cutoff) { return nil } - // 删除孤儿文件 if err := os.Remove(path); err != nil { log.Warnf("Failed to remove orphaned temp file %s: %v", path, err) } else { diff --git a/internal/driver/driver.go b/internal/driver/driver.go index d6405fd93..2d0e2b137 100644 --- a/internal/driver/driver.go +++ b/internal/driver/driver.go @@ -84,23 +84,18 @@ type Remove interface { Remove(ctx context.Context, obj model.Obj) error } -// IUploadInfo 上传信息接口 type IUploadInfo interface { GetUploadInfo() *model.UploadInfo } -// IPreup 预上传接口 type IPreup interface { Preup(ctx context.Context, srcobj model.Obj, req *reqres.PreupReq) (*model.PreupInfo, error) } -// ISliceUpload 分片上传接口 type ISliceUpload interface { - // SliceUpload 分片上传 SliceUpload(ctx context.Context, req *tables.SliceUpload, sliceno uint, file io.Reader) error } -// IUploadSliceComplete 分片上传完成接口 type IUploadSliceComplete interface { UploadSliceComplete(ctx context.Context, req *tables.SliceUpload) error } diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 0afc0ad62..6079dce90 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -190,19 +190,14 @@ func PutURL(ctx context.Context, path, dstName, urlStr string) error { return op.PutURL(ctx, storage, dstDirActualPath, dstName, urlStr) } -/// 分片上传功能-------------------------------------------------------------------- - -// Preup 预上传 - 使用新的管理器重构 func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.PreupReq) (*reqres.PreupResp, error) { return getGlobalSliceManager().CreateSession(c, s, actualPath, req) } -// UploadSlice 流式上传切片 - 使用新的管理器重构,支持流式上传 func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, reader io.Reader) error { return getGlobalSliceManager().UploadSlice(ctx, storage, req, reader) } -// SliceUpComplete 完成分片上传 - 使用新的管理器重构 func SliceUpComplete(ctx context.Context, storage driver.Driver, taskID string) (*reqres.UploadSliceCompleteResp, error) { return getGlobalSliceManager().CompleteUpload(ctx, storage, taskID) } diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go index c6ddc7f43..e5417c817 100644 --- a/internal/fs/sliceup.go +++ b/internal/fs/sliceup.go @@ -54,10 +54,8 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D } user := ctx.Value(conf.UserKey).(*model.User) - // 生成唯一的TaskID taskID := uuid.New().String() - //创建新的上传任务 createsu := &tables.SliceUpload{ TaskID: taskID, DstPath: req.Path, @@ -121,13 +119,11 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D }, nil } -// getOrLoadSession 获取或加载会话,提高代码复用性 func (m *SliceUploadManager) getOrLoadSession(taskID string) (*SliceUploadSession, error) { session, err, _ := m.sessionG.Do(taskID, func() (*SliceUploadSession, error) { if s, ok := m.cache.Load(taskID); ok { return s.(*SliceUploadSession), nil } - // 首次加载,需要从数据库获取 su, err := db.GetSliceUploadByTaskID(taskID) if err != nil { return nil, errors.WithMessagef(err, "failed get slice upload [%s]", taskID) @@ -141,7 +137,7 @@ func (m *SliceUploadManager) getOrLoadSession(taskID string) (*SliceUploadSessio return session, err } -// UploadSlice 流式上传分片 - 支持流式上传,避免表单上传的内存占用 +// UploadSlice 流式上传分片 func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, reader io.Reader) error { session, err := m.getOrLoadSession(req.TaskID) if err != nil { @@ -149,13 +145,12 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri return err } - // 确保并发安全的错误处理 defer func() { if err != nil { session.mutex.Lock() session.Status = tables.SliceUploadStatusFailed session.Message = err.Error() - updateData := *session.SliceUpload // 复制数据避免锁持有时间过长 + updateData := *session.SliceUpload session.mutex.Unlock() if updateErr := db.UpdateSliceUpload(&updateData); updateErr != nil { @@ -166,7 +161,6 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri // 使用锁保护状态检查 session.mutex.Lock() - // 检查分片是否已上传过 if tables.IsSliceUploaded(session.SliceUploadStatus, int(req.SliceNum)) { session.mutex.Unlock() log.Warnf("slice already uploaded,req:%+v", req) @@ -178,8 +172,7 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri if req.SliceHash != "" { session.mutex.Lock() - // 验证分片hash值 - if req.SliceNum == 0 { // 第一个分片,slicehash是所有的分片hash + if req.SliceNum == 0 { hs := strings.Split(req.SliceHash, ",") if len(hs) != int(session.SliceCnt) { session.mutex.Unlock() @@ -187,18 +180,15 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri log.Error("slice hash count mismatch", req, err) return err } - session.SliceHash = req.SliceHash // 存储完整的hash字符串 + session.SliceHash = req.SliceHash } else { - // 非第0个分片,不覆盖 SliceHash,保持完整的hash列表 log.Debugf("Slice %d hash: %s (keeping complete hash list)", req.SliceNum, req.SliceHash) } session.mutex.Unlock() } - // 根据存储类型处理分片上传 switch s := storage.(type) { case driver.ISliceUpload: - // Native slice upload: directly pass stream data, let frontend handle retry and recovery if err := s.SliceUpload(ctx, session.SliceUpload, req.SliceNum, reader); err != nil { log.Errorf("Native slice upload failed - TaskID: %s, SliceNum: %d, Error: %v", req.TaskID, req.SliceNum, err) @@ -207,13 +197,12 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri log.Debugf("Native slice upload success - TaskID: %s, SliceNum: %d", req.TaskID, req.SliceNum) - default: //其他网盘先缓存到本地 + default: if err := session.ensureTmpFile(); err != nil { log.Error("ensureTmpFile error", req, err) return err } - // 流式复制,减少内存占用 sw := &sliceWriter{ file: session.tmpFile, offset: int64(req.SliceNum) * int64(session.SliceSize), @@ -225,10 +214,9 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri } } - // 原子性更新分片状态 session.mutex.Lock() tables.SetSliceUploaded(session.SliceUploadStatus, int(req.SliceNum)) - updateData := *session.SliceUpload // 复制数据 + updateData := *session.SliceUpload session.mutex.Unlock() err = db.UpdateSliceUpload(&updateData) @@ -263,7 +251,6 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. } defer func() { - // 确保资源清理和缓存删除 session.cleanup() m.cache.Delete(session.TaskID) @@ -278,7 +265,6 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. log.Errorf("Failed to update slice upload status: %v", updateErr) } } else { - // 上传成功后从数据库中删除记录,允许重复上传 if deleteErr := db.DeleteSliceUploadByTaskID(session.TaskID); deleteErr != nil { log.Errorf("Failed to delete slice upload record: %v", deleteErr) } @@ -293,14 +279,12 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. return nil, err } - // 原生分片上传成功,直接返回,defer中会删除数据库记录 return &reqres.UploadSliceCompleteResp{ Complete: 1, TaskID: session.TaskID, }, nil default: - // 其他网盘客户端上传到本地后,上传到网盘,使用任务处理 session.mutex.Lock() tmpFile := session.tmpFile session.mutex.Unlock() @@ -330,7 +314,6 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. if session.AsTask { file.SetTmpFile(tmpFile) - // 防止defer中清理文件 session.mutex.Lock() session.tmpFile = nil session.TmpFile = "" @@ -360,15 +343,12 @@ func (m *SliceUploadManager) CompleteUpload(ctx context.Context, storage driver. } } -// ensureTmpFile 确保临时文件存在且正确初始化,线程安全 - 使用持久化目录 func (s *SliceUploadSession) ensureTmpFile() error { s.mutex.Lock() defer s.mutex.Unlock() if s.TmpFile == "" { - // 使用TaskID作为文件名的一部分,确保唯一性和可识别性 filename := fmt.Sprintf("slice_upload_%s_%s", s.TaskID, s.Name) - // 清理文件名中的特殊字符 filename = strings.ReplaceAll(filename, "/", "_") filename = strings.ReplaceAll(filename, "\\", "_") filename = strings.ReplaceAll(filename, ":", "_") @@ -392,7 +372,6 @@ func (s *SliceUploadSession) ensureTmpFile() error { // 更新数据库中的临时文件路径 if updateErr := db.UpdateSliceUpload(s.SliceUpload); updateErr != nil { log.Errorf("Failed to update temp file path in database: %v", updateErr) - // 不返回错误,因为文件已经创建成功,只是数据库更新失败 } log.Debugf("Created persistent temp file: %s", tmpPath) @@ -410,7 +389,6 @@ func (s *SliceUploadSession) ensureTmpFile() error { return nil } -// cleanup 清理资源,线程安全 - 保持原始实现 func (s *SliceUploadSession) cleanup() { s.mutex.Lock() defer s.mutex.Unlock() diff --git a/server/handles/fsup.go b/server/handles/fsup.go index a591fb4b1..25a59e511 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -238,7 +238,6 @@ func FsPreup(c *gin.Context) { return } - // 基本参数验证 if req.Name == "" { common.ErrorResp(c, fmt.Errorf("file name is required"), 400) return @@ -274,7 +273,6 @@ func FsUpSlice(c *gin.Context) { } _ = c.Request.Body.Close() }() - // 从HTTP头获取参数 taskID := c.GetHeader("X-Task-ID") if taskID == "" { common.ErrorResp(c, fmt.Errorf("X-Task-ID header is required"), 400) @@ -295,14 +293,12 @@ func FsUpSlice(c *gin.Context) { sliceHash := c.GetHeader("X-Slice-Hash") - // 构建请求对象 req := &reqres.UploadSliceReq{ TaskID: taskID, SliceHash: sliceHash, SliceNum: uint(sliceNum), } - // 获取请求体作为流 reader := c.Request.Body if reader == nil { common.ErrorResp(c, fmt.Errorf("request body is required"), 400) @@ -311,7 +307,6 @@ func FsUpSlice(c *gin.Context) { storage := c.Request.Context().Value(conf.StorageKey).(driver.Driver) - // 调用流式上传分片函数 err = fs.UploadSlice(c.Request.Context(), storage, req, reader) if err != nil { common.ErrorResp(c, fmt.Errorf("upload slice failed: %w", err), 500) From b5a05cbf6dcf4a7536c66305fc729f24c5f20448 Mon Sep 17 00:00:00 2001 From: j2rong4cn Date: Sat, 6 Sep 2025 17:41:01 +0800 Subject: [PATCH 23/26] =?UTF-8?q?fix(upload):=20=E4=BC=98=E5=8C=96?= =?UTF-8?q?=E5=85=A8=E5=B1=80=E5=88=86=E7=89=87=E4=B8=8A=E4=BC=A0=E7=AE=A1?= =?UTF-8?q?=E7=90=86=E5=99=A8=E7=9A=84=E5=88=9D=E5=A7=8B=E5=8C=96=E9=80=BB?= =?UTF-8?q?=E8=BE=91=EF=BC=8C=E7=A7=BB=E9=99=A4=E5=86=97=E4=BD=99=E5=87=BD?= =?UTF-8?q?=E6=95=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- internal/fs/fs.go | 6 +++--- internal/fs/sliceup.go | 23 ++++------------------- 2 files changed, 7 insertions(+), 22 deletions(-) diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 6079dce90..69ecd853b 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -191,13 +191,13 @@ func PutURL(ctx context.Context, path, dstName, urlStr string) error { } func Preup(c context.Context, s driver.Driver, actualPath string, req *reqres.PreupReq) (*reqres.PreupResp, error) { - return getGlobalSliceManager().CreateSession(c, s, actualPath, req) + return globalSliceManager.CreateSession(c, s, actualPath, req) } func UploadSlice(ctx context.Context, storage driver.Driver, req *reqres.UploadSliceReq, reader io.Reader) error { - return getGlobalSliceManager().UploadSlice(ctx, storage, req, reader) + return globalSliceManager.UploadSlice(ctx, storage, req, reader) } func SliceUpComplete(ctx context.Context, storage driver.Driver, taskID string) (*reqres.UploadSliceCompleteResp, error) { - return getGlobalSliceManager().CompleteUpload(ctx, storage, taskID) + return globalSliceManager.CompleteUpload(ctx, storage, taskID) } diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go index e5417c817..f531739fe 100644 --- a/internal/fs/sliceup.go +++ b/internal/fs/sliceup.go @@ -38,13 +38,6 @@ type SliceUploadSession struct { mutex sync.Mutex } -// NewSliceUploadManager 创建分片上传管理器 -func NewSliceUploadManager() *SliceUploadManager { - manager := &SliceUploadManager{} - go manager.cleanupIncompleteUploads() - return manager -} - // CreateSession 创建新的上传会话 func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.Driver, actualPath string, req *reqres.PreupReq) (*reqres.PreupResp, error) { srcobj, err := op.Get(ctx, storage, actualPath) @@ -190,11 +183,11 @@ func (m *SliceUploadManager) UploadSlice(ctx context.Context, storage driver.Dri switch s := storage.(type) { case driver.ISliceUpload: if err := s.SliceUpload(ctx, session.SliceUpload, req.SliceNum, reader); err != nil { - log.Errorf("Native slice upload failed - TaskID: %s, SliceNum: %d, Error: %v", + log.Errorf("Native slice upload failed - TaskID: %s, SliceNum: %d, Error: %v", req.TaskID, req.SliceNum, err) return errors.WithMessagef(err, "slice %d upload failed", req.SliceNum) } - log.Debugf("Native slice upload success - TaskID: %s, SliceNum: %d", + log.Debugf("Native slice upload success - TaskID: %s, SliceNum: %d", req.TaskID, req.SliceNum) default: @@ -410,19 +403,11 @@ func (s *SliceUploadSession) cleanup() { // 全局管理器实例使用延迟初始化 var globalSliceManager *SliceUploadManager -var globalSliceManagerOnce sync.Once func InitSliceUploadManager() { log.Info("Initializing slice upload manager...") - getGlobalSliceManager() -} - -// getGlobalSliceManager 获取全局分片上传管理器(延迟初始化) -func getGlobalSliceManager() *SliceUploadManager { - globalSliceManagerOnce.Do(func() { - globalSliceManager = NewSliceUploadManager() - }) - return globalSliceManager + globalSliceManager = &SliceUploadManager{} + go globalSliceManager.cleanupIncompleteUploads() } // sliceWriter 分片写入器 - 保持原始实现 From a438bb8b79efe029c42b1894848513dcc75dc001 Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Sat, 6 Sep 2025 17:52:57 +0800 Subject: [PATCH 24/26] =?UTF-8?q?fix(upload):=20=E8=B0=83=E6=95=B4?= =?UTF-8?q?=E6=97=A5=E5=BF=97=E7=BA=A7=E5=88=AB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- drivers/123_open/upload.go | 2 +- internal/db/slice_upload.go | 4 ++-- internal/fs/sliceup.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/123_open/upload.go b/drivers/123_open/upload.go index 6de0088ab..b3eb09de1 100644 --- a/drivers/123_open/upload.go +++ b/drivers/123_open/upload.go @@ -211,7 +211,7 @@ func (d *Open123) sliceUpComplete(uploadID string) error { log.Error("123 open uploadComplete error", err) return err } - log.Infof("upload complete,body: %s", string(b)) + log.Debugf("upload complete,body: %s", string(b)) if r.Data.Completed { return nil } diff --git a/internal/db/slice_upload.go b/internal/db/slice_upload.go index de0b5f778..aadb11f26 100644 --- a/internal/db/slice_upload.go +++ b/internal/db/slice_upload.go @@ -111,7 +111,7 @@ func CleanupOrphanedSliceUploads() error { } if cleanedCount > 0 { - log.Infof("Cleaned up %d orphaned slice upload tasks", cleanedCount) + log.Debugf("Cleaned up %d orphaned slice upload tasks", cleanedCount) } return cleanupOrphanedTempFiles() @@ -187,7 +187,7 @@ func cleanupOrphanedTempFiles() error { } if cleanedCount > 0 { - log.Infof("Cleaned up %d orphaned temp files from %s", cleanedCount, tempDir) + log.Debugf("Cleaned up %d orphaned temp files from %s", cleanedCount, tempDir) } return nil diff --git a/internal/fs/sliceup.go b/internal/fs/sliceup.go index f531739fe..bf45b3b73 100644 --- a/internal/fs/sliceup.go +++ b/internal/fs/sliceup.go @@ -63,7 +63,7 @@ func (m *SliceUploadManager) CreateSession(ctx context.Context, storage driver.D AsTask: req.AsTask, UserID: user.ID, } - log.Infof("storage mount path %s", storage.GetStorage().MountPath) + log.Debugf("storage mount path %s", storage.GetStorage().MountPath) switch st := storage.(type) { case driver.IPreup: From db9aa34de4e4c05e5917998f11d1faf900dcc196 Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Tue, 23 Sep 2025 13:07:29 +0800 Subject: [PATCH 25/26] Merge main branch --- .github/workflows/beta_release.yml | 2 +- .github/workflows/build.yml | 2 +- .github/workflows/release.yml | 2 +- .github/workflows/release_docker.yml | 4 +- .github/workflows/test_docker.yml | 2 +- README.md | 11 +- README_cn.md | 10 +- README_ja.md | 10 +- README_nl.md | 10 +- build.sh | 6 +- drivers/115_open/driver.go | 21 + drivers/123/types.go | 2 +- drivers/123_open/driver.go | 15 + drivers/123_open/types.go | 6 +- drivers/123_share/types.go | 2 +- drivers/189_tv/driver.go | 22 +- drivers/189_tv/meta.go | 1 - drivers/189_tv/utils.go | 66 +++- drivers/189pc/driver.go | 30 +- drivers/189pc/help.go | 14 + drivers/189pc/meta.go | 8 +- drivers/189pc/types.go | 31 +- drivers/189pc/utils.go | 277 ++++++++++++-- drivers/alias/driver.go | 71 +++- drivers/alias/meta.go | 1 + drivers/aliyundrive_open/driver.go | 15 + drivers/all.go | 1 + drivers/baidu_netdisk/driver.go | 8 + drivers/baidu_netdisk/types.go | 7 + drivers/baidu_netdisk/util.go | 12 + drivers/chunk/driver.go | 488 ++++++++++++++++++++++++ drivers/chunk/meta.go | 31 ++ drivers/chunk/obj.go | 8 + drivers/cloudreve_v4/driver.go | 15 + drivers/cloudreve_v4/types.go | 6 + drivers/crypt/driver.go | 14 + drivers/local/driver.go | 17 +- drivers/local/util_unix.go | 17 + drivers/local/util_windows.go | 73 ++-- drivers/quark_uc_tv/driver.go | 12 +- drivers/quark_uc_tv/meta.go | 2 + drivers/sftp/driver.go | 19 + drivers/smb/driver.go | 16 + drivers/template/driver.go | 5 + internal/archive/archives/archives.go | 12 +- internal/archive/archives/utils.go | 9 +- internal/archive/iso9660/iso9660.go | 10 +- internal/archive/iso9660/utils.go | 14 +- internal/archive/rardecode/rardecode.go | 6 +- internal/archive/rardecode/utils.go | 24 +- internal/archive/tool/helper.go | 39 +- internal/bootstrap/data/setting.go | 3 +- internal/conf/const.go | 7 +- internal/driver/driver.go | 5 + internal/fs/fs.go | 10 +- internal/fs/get.go | 4 +- internal/fs/list.go | 2 +- internal/model/obj.go | 16 + internal/model/object.go | 13 + internal/model/storage.go | 37 ++ internal/net/request.go | 4 +- internal/net/serve.go | 12 +- internal/op/archive.go | 5 +- internal/op/fs.go | 13 +- internal/op/storage.go | 56 ++- internal/stream/stream.go | 60 ++- internal/stream/util.go | 2 +- pkg/buffer/bytes.go | 59 +-- pkg/buffer/bytes_test.go | 5 +- pkg/buffer/file.go | 88 +++++ pkg/utils/hash.go | 5 + pkg/utils/io.go | 41 +- server/handles/down.go | 2 +- server/handles/fsread.go | 90 +++-- server/handles/fsup.go | 19 +- server/handles/storage.go | 40 +- server/webdav/webdav.go | 15 +- 77 files changed, 1825 insertions(+), 294 deletions(-) create mode 100644 drivers/chunk/driver.go create mode 100644 drivers/chunk/meta.go create mode 100644 drivers/chunk/obj.go create mode 100644 pkg/buffer/file.go diff --git a/.github/workflows/beta_release.yml b/.github/workflows/beta_release.yml index 90148487a..268a5833f 100644 --- a/.github/workflows/beta_release.yml +++ b/.github/workflows/beta_release.yml @@ -87,7 +87,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: "1.24.5" + go-version: "1.25.0" - name: Setup web run: bash build.sh dev web diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4e975d1f4..a2393b84b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -33,7 +33,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: "1.24.5" + go-version: "1.25.0" - name: Setup web run: bash build.sh dev web diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 97280bc9d..9bbf0f1ca 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -46,7 +46,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: '1.24' + go-version: '1.25.0' - name: Checkout uses: actions/checkout@v4 diff --git a/.github/workflows/release_docker.yml b/.github/workflows/release_docker.yml index a5169013b..80c065647 100644 --- a/.github/workflows/release_docker.yml +++ b/.github/workflows/release_docker.yml @@ -47,7 +47,7 @@ jobs: - uses: actions/setup-go@v5 with: - go-version: 'stable' + go-version: '1.25.0' - name: Cache Musl id: cache-musl @@ -87,7 +87,7 @@ jobs: - uses: actions/setup-go@v5 with: - go-version: 'stable' + go-version: '1.25.0' - name: Cache Musl id: cache-musl diff --git a/.github/workflows/test_docker.yml b/.github/workflows/test_docker.yml index c52fc6b77..aa6fe8966 100644 --- a/.github/workflows/test_docker.yml +++ b/.github/workflows/test_docker.yml @@ -36,7 +36,7 @@ jobs: - uses: actions/setup-go@v5 with: - go-version: 'stable' + go-version: '1.25.0' - name: Cache Musl id: cache-musl diff --git a/README.md b/README.md index bf71696fa..00f2b585d 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,6 @@ Thank you for your support and understanding of the OpenList project. - [x] [Thunder](https://pan.xunlei.com) - [x] [Lanzou](https://www.lanzou.com) - [x] [ILanzou](https://www.ilanzou.com) - - [x] [Aliyundrive share](https://www.alipan.com) - [x] [Google photo](https://photos.google.com) - [x] [Mega.nz](https://mega.nz) - [x] [Baidu photo](https://photo.baidu.com) @@ -85,6 +84,16 @@ Thank you for your support and understanding of the OpenList project. - [x] [FeijiPan](https://www.feijipan.com) - [x] [dogecloud](https://www.dogecloud.com/product/oss) - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) + - [x] [Chaoxing](https://www.chaoxing.com) + - [x] [CNB](https://cnb.cool/) + - [x] [Degoo](https://degoo.com) + - [x] [Doubao](https://www.doubao.com) + - [x] [Febbox](https://www.febbox.com) + - [x] [GitHub](https://github.com) + - [x] [OpenList](https://github.com/OpenListTeam/OpenList) + - [x] [Teldrive](https://github.com/tgdrive/teldrive) + - [x] [Weiyun](https://www.weiyun.com) + - [x] Easy to deploy and out-of-the-box - [x] File preview (PDF, markdown, code, plain text, ...) - [x] Image preview in gallery mode diff --git a/README_cn.md b/README_cn.md index b03b7bf2c..fb6690507 100644 --- a/README_cn.md +++ b/README_cn.md @@ -74,7 +74,6 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3 - [x] [迅雷网盘](https://pan.xunlei.com) - [x] [蓝奏云](https://www.lanzou.com) - [x] [蓝奏云优享版](https://www.ilanzou.com) - - [x] [阿里云盘分享](https://www.alipan.com) - [x] [Google 相册](https://photos.google.com) - [x] [Mega.nz](https://mega.nz) - [x] [百度相册](https://photo.baidu.com) @@ -85,6 +84,15 @@ OpenList 是一个由 OpenList 团队独立维护的开源项目,遵循 AGPL-3 - [x] [飞机盘](https://www.feijipan.com) - [x] [多吉云](https://www.dogecloud.com/product/oss) - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) + - [x] [超星](https://www.chaoxing.com) + - [x] [CNB](https://cnb.cool/) + - [x] [Degoo](https://degoo.com) + - [x] [豆包](https://www.doubao.com) + - [x] [Febbox](https://www.febbox.com) + - [x] [GitHub](https://github.com) + - [x] [OpenList](https://github.com/OpenListTeam/OpenList) + - [x] [Teldrive](https://github.com/tgdrive/teldrive) + - [x] [微云](https://www.weiyun.com) - [x] 部署方便,开箱即用 - [x] 文件预览(PDF、markdown、代码、纯文本等) - [x] 画廊模式下的图片预览 diff --git a/README_ja.md b/README_ja.md index 0a613dcab..f39034288 100644 --- a/README_ja.md +++ b/README_ja.md @@ -74,7 +74,6 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい - [x] [Thunder](https://pan.xunlei.com) - [x] [Lanzou](https://www.lanzou.com) - [x] [ILanzou](https://www.ilanzou.com) - - [x] [Aliyundrive share](https://www.alipan.com) - [x] [Google photo](https://photos.google.com) - [x] [Mega.nz](https://mega.nz) - [x] [Baidu photo](https://photo.baidu.com) @@ -85,6 +84,15 @@ OpenListプロジェクトへのご支援とご理解をありがとうござい - [x] [FeijiPan](https://www.feijipan.com) - [x] [dogecloud](https://www.dogecloud.com/product/oss) - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) + - [x] [Chaoxing](https://www.chaoxing.com) + - [x] [CNB](https://cnb.cool/) + - [x] [Degoo](https://degoo.com) + - [x] [Doubao](https://www.doubao.com) + - [x] [Febbox](https://www.febbox.com) + - [x] [GitHub](https://github.com) + - [x] [OpenList](https://github.com/OpenListTeam/OpenList) + - [x] [Teldrive](https://github.com/tgdrive/teldrive) + - [x] [Weiyun](https://www.weiyun.com) - [x] 簡単にデプロイでき、すぐに使える - [x] ファイルプレビュー(PDF、markdown、コード、テキストなど) - [x] ギャラリーモードでの画像プレビュー diff --git a/README_nl.md b/README_nl.md index c9acc222b..56260243a 100644 --- a/README_nl.md +++ b/README_nl.md @@ -74,7 +74,6 @@ Dank u voor uw ondersteuning en begrip - [x] [Thunder](https://pan.xunlei.com) - [x] [Lanzou](https://www.lanzou.com) - [x] [ILanzou](https://www.ilanzou.com) - - [x] [Aliyundrive share](https://www.alipan.com) - [x] [Google photo](https://photos.google.com) - [x] [Mega.nz](https://mega.nz) - [x] [Baidu photo](https://photo.baidu.com) @@ -85,6 +84,15 @@ Dank u voor uw ondersteuning en begrip - [x] [FeijiPan](https://www.feijipan.com) - [x] [dogecloud](https://www.dogecloud.com/product/oss) - [x] [Azure Blob Storage](https://azure.microsoft.com/products/storage/blobs) + - [x] [Chaoxing](https://www.chaoxing.com) + - [x] [CNB](https://cnb.cool/) + - [x] [Degoo](https://degoo.com) + - [x] [Doubao](https://www.doubao.com) + - [x] [Febbox](https://www.febbox.com) + - [x] [GitHub](https://github.com) + - [x] [OpenList](https://github.com/OpenListTeam/OpenList) + - [x] [Teldrive](https://github.com/tgdrive/teldrive) + - [x] [Weiyun](https://www.weiyun.com) - [x] Eenvoudig te implementeren en direct te gebruiken - [x] Bestandsvoorbeeld (PDF, markdown, code, platte tekst, ...) - [x] Afbeeldingsvoorbeeld in galerijweergave diff --git a/build.sh b/build.sh index 8f60a21f2..26e5a301b 100644 --- a/build.sh +++ b/build.sh @@ -236,7 +236,7 @@ BuildRelease() { BuildLoongGLIBC() { local target_abi="$2" local output_file="$1" - local oldWorldGoVersion="1.24.3" + local oldWorldGoVersion="1.25.0" if [ "$target_abi" = "abi1.0" ]; then echo building for linux-loong64-abi1.0 @@ -254,13 +254,13 @@ BuildLoongGLIBC() { # Download and setup patched Go compiler for old-world if ! curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \ - "https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \ + "https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \ -o go-loong64-abi1.0.tar.gz; then echo "Error: Failed to download patched Go compiler for old-world ABI1.0" if [ -n "$GITHUB_TOKEN" ]; then echo "Error output from curl:" curl -fsSL --retry 3 -H "Authorization: Bearer $GITHUB_TOKEN" \ - "https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250722/go${oldWorldGoVersion}.linux-amd64.tar.gz" \ + "https://github.com/loong64/loong64-abi1.0-toolchains/releases/download/20250821/go${oldWorldGoVersion}.linux-amd64.tar.gz" \ -o go-loong64-abi1.0.tar.gz || true fi return 1 diff --git a/drivers/115_open/driver.go b/drivers/115_open/driver.go index 1ded971ed..edab65abe 100644 --- a/drivers/115_open/driver.go +++ b/drivers/115_open/driver.go @@ -337,6 +337,27 @@ func (d *Open115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, er return resp, nil } +func (d *Open115) GetDetails(ctx context.Context) (*model.StorageDetails, error) { + userInfo, err := d.client.UserInfo(ctx) + if err != nil { + return nil, err + } + total, err := userInfo.RtSpaceInfo.AllTotal.Size.Int64() + if err != nil { + return nil, err + } + free, err := userInfo.RtSpaceInfo.AllRemain.Size.Int64() + if err != nil { + return nil, err + } + return &model.StorageDetails{ + DiskUsage: model.DiskUsage{ + TotalSpace: uint64(total), + FreeSpace: uint64(free), + }, + }, nil +} + // func (d *Open115) GetArchiveMeta(ctx context.Context, obj model.Obj, args model.ArchiveArgs) (model.ArchiveMeta, error) { // // TODO get archive file meta-info, return errs.NotImplement to use an internal archive tool, optional // return nil, errs.NotImplement diff --git a/drivers/123/types.go b/drivers/123/types.go index 15e05a152..7e5967d8b 100644 --- a/drivers/123/types.go +++ b/drivers/123/types.go @@ -28,7 +28,7 @@ func (f File) CreateTime() time.Time { } func (f File) GetHash() utils.HashInfo { - return utils.HashInfo{} + return utils.NewHashInfo(utils.MD5, f.Etag) } func (f File) GetPath() string { diff --git a/drivers/123_open/driver.go b/drivers/123_open/driver.go index 260c3a776..fa279d5da 100644 --- a/drivers/123_open/driver.go +++ b/drivers/123_open/driver.go @@ -298,5 +298,20 @@ func (d *Open123) UploadSliceComplete(ctx context.Context, su *tables.SliceUploa return nil } +func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) { + userInfo, err := d.getUserInfo() + if err != nil { + return nil, err + } + total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp + free := total - userInfo.Data.SpaceUsed + return &model.StorageDetails{ + DiskUsage: model.DiskUsage{ + TotalSpace: total, + FreeSpace: free, + }, + }, nil +} + var _ driver.Driver = (*Open123)(nil) var _ driver.PutResult = (*Open123)(nil) diff --git a/drivers/123_open/types.go b/drivers/123_open/types.go index 34c609b96..ca095d5f2 100644 --- a/drivers/123_open/types.go +++ b/drivers/123_open/types.go @@ -134,9 +134,9 @@ type UserInfoResp struct { // HeadImage string `json:"headImage"` // Passport string `json:"passport"` // Mail string `json:"mail"` - // SpaceUsed int64 `json:"spaceUsed"` - // SpacePermanent int64 `json:"spacePermanent"` - // SpaceTemp int64 `json:"spaceTemp"` + SpaceUsed uint64 `json:"spaceUsed"` + SpacePermanent uint64 `json:"spacePermanent"` + SpaceTemp uint64 `json:"spaceTemp"` // SpaceTempExpr int64 `json:"spaceTempExpr"` // Vip bool `json:"vip"` // DirectTraffic int64 `json:"directTraffic"` diff --git a/drivers/123_share/types.go b/drivers/123_share/types.go index 6062e8468..3919c5fad 100644 --- a/drivers/123_share/types.go +++ b/drivers/123_share/types.go @@ -24,7 +24,7 @@ type File struct { } func (f File) GetHash() utils.HashInfo { - return utils.HashInfo{} + return utils.NewHashInfo(utils.MD5, f.Etag) } func (f File) GetPath() string { diff --git a/drivers/189_tv/driver.go b/drivers/189_tv/driver.go index cf943a42a..d24ef529f 100644 --- a/drivers/189_tv/driver.go +++ b/drivers/189_tv/driver.go @@ -1,7 +1,6 @@ package _189_tv import ( - "container/ring" "context" "net/http" "strconv" @@ -12,18 +11,20 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/pkg/cron" "github.com/go-resty/resty/v2" ) type Cloud189TV struct { model.Storage Addition - client *resty.Client - tokenInfo *AppSessionResp - uploadThread int - familyTransferFolder *ring.Ring - cleanFamilyTransferFile func() - storageConfig driver.Config + client *resty.Client + tokenInfo *AppSessionResp + uploadThread int + storageConfig driver.Config + + TempUuid string + cron *cron.Cron // 新增 cron 字段 } func (y *Cloud189TV) Config() driver.Config { @@ -79,10 +80,17 @@ func (y *Cloud189TV) Init(ctx context.Context) (err error) { } } + y.cron = cron.NewCron(time.Minute * 5) + y.cron.Do(y.keepAlive) + return } func (y *Cloud189TV) Drop(ctx context.Context) error { + if y.cron != nil { + y.cron.Stop() + y.cron = nil + } return nil } diff --git a/drivers/189_tv/meta.go b/drivers/189_tv/meta.go index efe344e33..f50fe7ead 100644 --- a/drivers/189_tv/meta.go +++ b/drivers/189_tv/meta.go @@ -8,7 +8,6 @@ import ( type Addition struct { driver.RootID AccessToken string `json:"access_token"` - TempUuid string OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"` OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` Type string `json:"type" type:"select" options:"personal,family" default:"personal"` diff --git a/drivers/189_tv/utils.go b/drivers/189_tv/utils.go index 4692ee6b2..395c5dcdd 100644 --- a/drivers/189_tv/utils.go +++ b/drivers/189_tv/utils.go @@ -66,6 +66,10 @@ func (y *Cloud189TV) AppKeySignatureHeader(url, method string) map[string]string } func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, isFamily ...bool) ([]byte, error) { + return y.requestWithRetry(url, method, callback, params, resp, 0, isFamily...) +} + +func (y *Cloud189TV) requestWithRetry(url, method string, callback base.ReqCallback, params map[string]string, resp interface{}, retryCount int, isFamily ...bool) ([]byte, error) { req := y.client.R().SetQueryParams(clientSuffix()) if params != nil { @@ -91,7 +95,22 @@ func (y *Cloud189TV) request(url, method string, callback base.ReqCallback, para if strings.Contains(res.String(), "userSessionBO is null") || strings.Contains(res.String(), "InvalidSessionKey") { - return nil, errors.New("session expired") + // 限制重试次数,避免无限递归 + if retryCount >= 3 { + y.Addition.AccessToken = "" + op.MustSaveDriverStorage(y) + return nil, errors.New("session expired after retry") + } + + // 尝试刷新会话 + if err := y.refreshSession(); err != nil { + // 如果刷新失败,说明AccessToken也已过期,需要重新登录 + y.Addition.AccessToken = "" + op.MustSaveDriverStorage(y) + return nil, errors.New("session expired") + } + // 如果刷新成功,则重试原始请求(增加重试计数) + return y.requestWithRetry(url, method, callback, params, resp, retryCount+1, isFamily...) } // 处理错误 @@ -211,7 +230,7 @@ func (y *Cloud189TV) login() (err error) { var erron RespErr var tokenInfo AppSessionResp if y.Addition.AccessToken == "" { - if y.Addition.TempUuid == "" { + if y.TempUuid == "" { // 获取登录参数 var uuidInfo UuidInfoResp req.SetResult(&uuidInfo).SetError(&erron) @@ -230,7 +249,7 @@ func (y *Cloud189TV) login() (err error) { if uuidInfo.Uuid == "" { return errors.New("uuidInfo is empty") } - y.Addition.TempUuid = uuidInfo.Uuid + y.TempUuid = uuidInfo.Uuid op.MustSaveDriverStorage(y) // 展示二维码 @@ -258,7 +277,7 @@ func (y *Cloud189TV) login() (err error) { // Signature req.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/qrcodeLoginResult.action", http.MethodGet)) - req.SetQueryParam("uuid", y.Addition.TempUuid) + req.SetQueryParam("uuid", y.TempUuid) _, err = req.Execute(http.MethodGet, ApiUrl+"/family/manage/qrcodeLoginResult.action") if err != nil { return @@ -270,7 +289,6 @@ func (y *Cloud189TV) login() (err error) { return errors.New("E189AccessToken is empty") } y.Addition.AccessToken = accessTokenResp.E189AccessToken - y.Addition.TempUuid = "" } } // 获取SessionKey 和 SessionSecret @@ -294,6 +312,44 @@ func (y *Cloud189TV) login() (err error) { return } +// refreshSession 尝试使用现有的 AccessToken 刷新会话 +func (y *Cloud189TV) refreshSession() (err error) { + var erron RespErr + var tokenInfo AppSessionResp + reqb := y.client.R().SetQueryParams(clientSuffix()) + reqb.SetResult(&tokenInfo).SetError(&erron) + // Signature + reqb.SetHeaders(y.AppKeySignatureHeader(ApiUrl+"/family/manage/loginFamilyMerge.action", + http.MethodGet)) + reqb.SetQueryParam("e189AccessToken", y.Addition.AccessToken) + _, err = reqb.Execute(http.MethodGet, ApiUrl+"/family/manage/loginFamilyMerge.action") + if err != nil { + return + } + + if erron.HasError() { + return &erron + } + + y.tokenInfo = &tokenInfo + return nil +} + +func (y *Cloud189TV) keepAlive() { + _, err := y.get(ApiUrl+"/keepUserSession.action", func(r *resty.Request) { + r.SetQueryParams(clientSuffix()) + }, nil) + if err != nil { + utils.Log.Warnf("189tv: Failed to keep user session alive: %v", err) + // 如果keepAlive失败,尝试刷新session + if refreshErr := y.refreshSession(); refreshErr != nil { + utils.Log.Errorf("189tv: Failed to refresh session after keepAlive error: %v", refreshErr) + } + } else { + utils.Log.Debugf("189tv: User session kept alive successfully.") + } +} + func (y *Cloud189TV) RapidUpload(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, isFamily bool, overwrite bool) (model.Obj, error) { fileMd5 := stream.GetHash().GetHash(utils.MD5) if len(fileMd5) < utils.MD5.Width { diff --git a/drivers/189pc/driver.go b/drivers/189pc/driver.go index 49e3dd732..f7ba93c9f 100644 --- a/drivers/189pc/driver.go +++ b/drivers/189pc/driver.go @@ -12,6 +12,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/errs" "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/pkg/cron" "github.com/OpenListTeam/OpenList/v4/pkg/utils" "github.com/go-resty/resty/v2" "github.com/google/uuid" @@ -21,12 +22,12 @@ type Cloud189PC struct { model.Storage Addition - identity string - client *resty.Client - loginParam *LoginParam - tokenInfo *AppSessionResp + loginParam *LoginParam + qrcodeParam *QRLoginParam + + tokenInfo *AppSessionResp uploadThread int @@ -35,6 +36,7 @@ type Cloud189PC struct { storageConfig driver.Config ref *Cloud189PC + cron *cron.Cron } func (y *Cloud189PC) Config() driver.Config { @@ -84,14 +86,22 @@ func (y *Cloud189PC) Init(ctx context.Context) (err error) { }) } - // 避免重复登陆 - identity := utils.GetMD5EncodeStr(y.Username + y.Password) - if !y.isLogin() || y.identity != identity { - y.identity = identity + // 先尝试用Token刷新,之后尝试登陆 + if y.Addition.RefreshToken != "" { + y.tokenInfo = &AppSessionResp{RefreshToken: y.Addition.RefreshToken} + if err = y.refreshToken(); err != nil { + return + } + } else { if err = y.login(); err != nil { return } } + + // 初始化并启动 cron 任务 + y.cron = cron.NewCron(time.Duration(time.Minute * 5)) + // 每5分钟执行一次 keepAlive + y.cron.Do(y.keepAlive) } // 处理家庭云ID @@ -128,6 +138,10 @@ func (d *Cloud189PC) InitReference(storage driver.Driver) error { func (y *Cloud189PC) Drop(ctx context.Context) error { y.ref = nil + if y.cron != nil { + y.cron.Stop() + y.cron = nil + } return nil } diff --git a/drivers/189pc/help.go b/drivers/189pc/help.go index 8bd90d475..6f6c59f30 100644 --- a/drivers/189pc/help.go +++ b/drivers/189pc/help.go @@ -80,6 +80,20 @@ func timestamp() int64 { return time.Now().UTC().UnixNano() / 1e6 } +// formatDate formats a time.Time object into the "YYYY-MM-DDHH:mm:ssSSS" format. +func formatDate(t time.Time) string { + // The layout string "2006-01-0215:04:05.000" corresponds to: + // 2006 -> Year (YYYY) + // 01 -> Month (MM) + // 02 -> Day (DD) + // 15 -> Hour (HH) + // 04 -> Minute (mm) + // 05 -> Second (ss) + // 000 -> Millisecond (SSS) with leading zeros + // Note the lack of a separator between the date and hour, matching the desired output. + return t.Format("2006-01-0215:04:05.000") +} + func MustParseTime(str string) *time.Time { lastOpTime, _ := time.ParseInLocation("2006-01-02 15:04:05 -07", str+" +08", time.Local) return &lastOpTime diff --git a/drivers/189pc/meta.go b/drivers/189pc/meta.go index 22d396158..670b99116 100644 --- a/drivers/189pc/meta.go +++ b/drivers/189pc/meta.go @@ -6,9 +6,11 @@ import ( ) type Addition struct { - Username string `json:"username" required:"true"` - Password string `json:"password" required:"true"` - VCode string `json:"validate_code"` + LoginType string `json:"login_type" type:"select" options:"password,qrcode" default:"password" required:"true"` + Username string `json:"username" required:"true"` + Password string `json:"password" required:"true"` + VCode string `json:"validate_code"` + RefreshToken string `json:"refresh_token" help:"To switch accounts, please clear this field"` driver.RootID OrderBy string `json:"order_by" type:"select" options:"filename,filesize,lastOpTime" default:"filename"` OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"asc"` diff --git a/drivers/189pc/types.go b/drivers/189pc/types.go index 6620483fc..d629a2ad9 100644 --- a/drivers/189pc/types.go +++ b/drivers/189pc/types.go @@ -68,15 +68,7 @@ func (e *RespErr) Error() string { return "" } -// 登陆需要的参数 -type LoginParam struct { - // 加密后的用户名和密码 - RsaUsername string - RsaPassword string - - // rsa密钥 - jRsaKey string - +type BaseLoginParam struct { // 请求头参数 Lt string ReqId string @@ -88,6 +80,27 @@ type LoginParam struct { CaptchaToken string } +// QRLoginParam 用于暂存二维码登录过程中的参数 +type QRLoginParam struct { + BaseLoginParam + + UUID string `json:"uuid"` + EncodeUUID string `json:"encodeuuid"` + EncryUUID string `json:"encryuuid"` +} + +// 登陆需要的参数 +type LoginParam struct { + // 加密后的用户名和密码 + RsaUsername string + RsaPassword string + + // rsa密钥 + jRsaKey string + + BaseLoginParam +} + // 登陆加密相关 type EncryptConfResp struct { Result int `json:"result"` diff --git a/drivers/189pc/utils.go b/drivers/189pc/utils.go index 12832ca80..39bc33222 100644 --- a/drivers/189pc/utils.go +++ b/drivers/189pc/utils.go @@ -29,6 +29,7 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/stream" "github.com/OpenListTeam/OpenList/v4/pkg/errgroup" "github.com/OpenListTeam/OpenList/v4/pkg/utils" + "github.com/skip2/go-qrcode" "github.com/avast/retry-go" "github.com/go-resty/resty/v2" @@ -54,6 +55,9 @@ const ( MAC = "TELEMAC" CHANNEL_ID = "web_cloud.189.cn" + + // Error codes + UserInvalidOpenTokenError = "UserInvalidOpenToken" ) func (y *Cloud189PC) SignatureHeader(url, method, params string, isFamily bool) map[string]string { @@ -264,7 +268,14 @@ func (y *Cloud189PC) findFileByName(ctx context.Context, searchName string, fold } } -func (y *Cloud189PC) login() (err error) { +func (y *Cloud189PC) login() error { + if y.LoginType == "qrcode" { + return y.loginByQRCode() + } + return y.loginByPassword() +} + +func (y *Cloud189PC) loginByPassword() (err error) { // 初始化登陆所需参数 if y.loginParam == nil { if err = y.initLoginParam(); err != nil { @@ -278,10 +289,15 @@ func (y *Cloud189PC) login() (err error) { // 销毁登陆参数 y.loginParam = nil // 遇到错误,重新加载登陆参数(刷新验证码) - if err != nil && y.NoUseOcr { - if err1 := y.initLoginParam(); err1 != nil { - err = fmt.Errorf("err1: %s \nerr2: %s", err, err1) + if err != nil { + if y.NoUseOcr { + if err1 := y.initLoginParam(); err1 != nil { + err = fmt.Errorf("err1: %s \nerr2: %s", err, err1) + } } + + y.Status = err.Error() + op.MustSaveDriverStorage(y) } }() @@ -336,14 +352,105 @@ func (y *Cloud189PC) login() (err error) { err = fmt.Errorf(tokenInfo.ResMessage) return } + y.Addition.RefreshToken = tokenInfo.RefreshToken y.tokenInfo = &tokenInfo + op.MustSaveDriverStorage(y) return } -/* 初始化登陆需要的参数 -* 如果遇到验证码返回错误 - */ -func (y *Cloud189PC) initLoginParam() error { +func (y *Cloud189PC) loginByQRCode() error { + if y.qrcodeParam == nil { + if err := y.initQRCodeParam(); err != nil { + // 二维码也通过错误返回 + return err + } + } + + var state struct { + Status int `json:"status"` + RedirectUrl string `json:"redirectUrl"` + Msg string `json:"msg"` + } + + now := time.Now() + _, err := y.client.R(). + SetHeaders(map[string]string{ + "Referer": AUTH_URL, + "Reqid": y.qrcodeParam.ReqId, + "lt": y.qrcodeParam.Lt, + }). + SetFormData(map[string]string{ + "appId": APP_ID, + "clientType": CLIENT_TYPE, + "returnUrl": RETURN_URL, + "paramId": y.qrcodeParam.ParamId, + "uuid": y.qrcodeParam.UUID, + "encryuuid": y.qrcodeParam.EncryUUID, + "date": formatDate(now), + "timeStamp": fmt.Sprint(now.UTC().UnixNano() / 1e6), + }). + ForceContentType("application/json;charset=UTF-8"). + SetResult(&state). + Post(AUTH_URL + "/api/logbox/oauth2/qrcodeLoginState.do") + if err != nil { + return fmt.Errorf("failed to check QR code state: %w", err) + } + + switch state.Status { + case 0: // 登录成功 + var tokenInfo AppSessionResp + _, err = y.client.R(). + SetResult(&tokenInfo). + SetQueryParams(clientSuffix()). + SetQueryParam("redirectURL", state.RedirectUrl). + Post(API_URL + "/getSessionForPC.action") + if err != nil { + return err + } + if tokenInfo.ResCode != 0 { + return fmt.Errorf(tokenInfo.ResMessage) + } + y.Addition.RefreshToken = tokenInfo.RefreshToken + y.tokenInfo = &tokenInfo + op.MustSaveDriverStorage(y) + return nil + case -11001: // 二维码过期 + y.qrcodeParam = nil + return errors.New("QR code expired, please try again") + case -106: // 等待扫描 + return y.genQRCode("QR code has not been scanned yet, please scan and save again") + case -11002: // 等待确认 + return y.genQRCode("QR code has been scanned, please confirm the login on your phone and save again") + default: // 其他错误 + y.qrcodeParam = nil + return fmt.Errorf("QR code login failed with status %d: %s", state.Status, state.Msg) + } +} + +func (y *Cloud189PC) genQRCode(text string) error { + // 展示二维码 + qrTemplate := ` + state: %s +
+
Or Click here: Login +` + + // Generate QR code + qrCode, err := qrcode.Encode(y.qrcodeParam.UUID, qrcode.Medium, 256) + if err != nil { + return fmt.Errorf("failed to generate QR code: %v", err) + } + + // Encode QR code to base64 + qrCodeBase64 := base64.StdEncoding.EncodeToString(qrCode) + + // Create the HTML page + qrPage := fmt.Sprintf(qrTemplate, text, qrCodeBase64, y.qrcodeParam.UUID) + return fmt.Errorf("need verify: \n%s", qrPage) + +} + +func (y *Cloud189PC) initBaseParams() (*BaseLoginParam, error) { // 清除cookie jar, _ := cookiejar.New(nil) y.client.SetCookieJar(jar) @@ -357,17 +464,30 @@ func (y *Cloud189PC) initLoginParam() error { }). Get(WEB_URL + "/api/portal/unifyLoginForPC.action") if err != nil { - return err + return nil, err } - param := LoginParam{ + return &BaseLoginParam{ CaptchaToken: regexp.MustCompile(`'captchaToken' value='(.+?)'`).FindStringSubmatch(res.String())[1], Lt: regexp.MustCompile(`lt = "(.+?)"`).FindStringSubmatch(res.String())[1], ParamId: regexp.MustCompile(`paramId = "(.+?)"`).FindStringSubmatch(res.String())[1], ReqId: regexp.MustCompile(`reqId = "(.+?)"`).FindStringSubmatch(res.String())[1], - // jRsaKey: regexp.MustCompile(`"j_rsaKey" value="(.+?)"`).FindStringSubmatch(res.String())[1], + }, nil +} + +/* 初始化登陆需要的参数 + * 如果遇到验证码返回错误 + */ +func (y *Cloud189PC) initLoginParam() error { + y.loginParam = nil + + baseParam, err := y.initBaseParams() + if err != nil { + return err } + y.loginParam = &LoginParam{BaseLoginParam: *baseParam} + // 获取rsa公钥 var encryptConf EncryptConfResp _, err = y.client.R(). @@ -378,18 +498,17 @@ func (y *Cloud189PC) initLoginParam() error { return err } - param.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey) - param.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Username) - param.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(param.jRsaKey, y.Password) - y.loginParam = ¶m + y.loginParam.jRsaKey = fmt.Sprintf("-----BEGIN PUBLIC KEY-----\n%s\n-----END PUBLIC KEY-----", encryptConf.Data.PubKey) + y.loginParam.RsaUsername = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Username) + y.loginParam.RsaPassword = encryptConf.Data.Pre + RsaEncrypt(y.loginParam.jRsaKey, y.Password) // 判断是否需要验证码 resp, err := y.client.R(). - SetHeader("REQID", param.ReqId). + SetHeader("REQID", y.loginParam.ReqId). SetFormData(map[string]string{ "appKey": APP_ID, "accountType": ACCOUNT_TYPE, - "userName": param.RsaUsername, + "userName": y.loginParam.RsaUsername, }).Post(AUTH_URL + "/api/logbox/oauth2/needcaptcha.do") if err != nil { return err @@ -401,8 +520,8 @@ func (y *Cloud189PC) initLoginParam() error { // 拉取验证码 imgRes, err := y.client.R(). SetQueryParams(map[string]string{ - "token": param.CaptchaToken, - "REQID": param.ReqId, + "token": y.loginParam.CaptchaToken, + "REQID": y.loginParam.ReqId, "rnd": fmt.Sprint(timestamp()), }). Get(AUTH_URL + "/api/logbox/oauth2/picCaptcha.do") @@ -429,10 +548,38 @@ func (y *Cloud189PC) initLoginParam() error { return nil } +// getQRCode 获取并返回二维码 +func (y *Cloud189PC) initQRCodeParam() (err error) { + y.qrcodeParam = nil + + baseParam, err := y.initBaseParams() + if err != nil { + return err + } + + var qrcodeParam QRLoginParam + _, err = y.client.R(). + SetFormData(map[string]string{"appId": APP_ID}). + ForceContentType("application/json;charset=UTF-8"). + SetResult(&qrcodeParam). + Post(AUTH_URL + "/api/logbox/oauth2/getUUID.do") + if err != nil { + return err + } + qrcodeParam.BaseLoginParam = *baseParam + y.qrcodeParam = &qrcodeParam + + return y.genQRCode("please scan the QR code with the 189 Cloud app, then save the settings again.") +} + // 刷新会话 func (y *Cloud189PC) refreshSession() (err error) { + return y.refreshSessionWithRetry(0) +} + +func (y *Cloud189PC) refreshSessionWithRetry(retryCount int) (err error) { if y.ref != nil { - return y.ref.refreshSession() + return y.ref.refreshSessionWithRetry(retryCount) } var erron RespErr var userSessionResp UserSessionResp @@ -449,24 +596,87 @@ func (y *Cloud189PC) refreshSession() (err error) { return err } - // 错误影响正常访问,下线该储存 - defer func() { - if err != nil { - y.GetStorage().SetStatus(fmt.Sprintf("%+v", err.Error())) + // token生效刷新token + if erron.HasError() { + if erron.ResCode == UserInvalidOpenTokenError { + return y.refreshTokenWithRetry(retryCount) + } + return &erron + } + y.tokenInfo.UserSessionResp = userSessionResp + return nil +} + +// refreshToken 刷新token,失败时返回错误,不再直接调用login +func (y *Cloud189PC) refreshToken() (err error) { + return y.refreshTokenWithRetry(0) +} + +func (y *Cloud189PC) refreshTokenWithRetry(retryCount int) (err error) { + if y.ref != nil { + return y.ref.refreshTokenWithRetry(retryCount) + } + + // 限制重试次数,避免无限递归 + if retryCount >= 3 { + if y.Addition.RefreshToken != "" { + y.Addition.RefreshToken = "" op.MustSaveDriverStorage(y) } - }() + return errors.New("refresh token failed after maximum retries") + } + + var erron RespErr + var tokenInfo AppSessionResp + _, err = y.client.R(). + SetResult(&tokenInfo). + ForceContentType("application/json;charset=UTF-8"). + SetError(&erron). + SetFormData(map[string]string{ + "clientId": APP_ID, + "refreshToken": y.tokenInfo.RefreshToken, + "grantType": "refresh_token", + "format": "json", + }). + Post(AUTH_URL + "/api/oauth2/refreshToken.do") + if err != nil { + return err + } + // 如果刷新失败,返回错误给上层处理 if erron.HasError() { - if erron.ResCode == "UserInvalidOpenToken" { - if err = y.login(); err != nil { - return err - } + if y.Addition.RefreshToken != "" { + y.Addition.RefreshToken = "" + op.MustSaveDriverStorage(y) } - return &erron + + // 根据登录类型决定下一步行为 + if y.LoginType == "qrcode" { + return errors.New("QR code session has expired, please re-scan the code to log in") + } + // 密码登录模式下,尝试回退到完整登录 + return y.login() + } + + y.Addition.RefreshToken = tokenInfo.RefreshToken + y.tokenInfo = &tokenInfo + op.MustSaveDriverStorage(y) + return y.refreshSessionWithRetry(retryCount + 1) +} + +func (y *Cloud189PC) keepAlive() { + _, err := y.get(API_URL+"/keepUserSession.action", func(r *resty.Request) { + r.SetQueryParams(clientSuffix()) + }, nil) + if err != nil { + utils.Log.Warnf("189pc: Failed to keep user session alive: %v", err) + // 如果keepAlive失败,尝试刷新session + if refreshErr := y.refreshSession(); refreshErr != nil { + utils.Log.Errorf("189pc: Failed to refresh session after keepAlive error: %v", refreshErr) + } + } else { + utils.Log.Debugf("189pc: User session kept alive successfully.") } - y.tokenInfo.UserSessionResp = userSessionResp - return } // 普通上传 @@ -575,8 +785,7 @@ func (y *Cloud189PC) StreamUpload(ctx context.Context, dstDir model.Obj, file mo // step.4 上传切片 uploadUrl := uploadUrls[0] - _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, - driver.NewLimitedUploadStream(ctx, rateLimitedRd), isFamily) + _, err = y.put(ctx, uploadUrl.RequestURL, uploadUrl.Headers, false, rateLimitedRd, isFamily) if err != nil { return err } diff --git a/drivers/alias/driver.go b/drivers/alias/driver.go index 6954f2b56..ea3c25394 100644 --- a/drivers/alias/driver.go +++ b/drivers/alias/driver.go @@ -79,21 +79,45 @@ func (d *Alias) Get(ctx context.Context, path string) (model.Obj, error) { if !ok { return nil, errs.ObjectNotFound } + var ret *model.Object + provider := "" for _, dst := range dsts { - obj, err := fs.Get(ctx, stdpath.Join(dst, sub), &fs.GetArgs{NoLog: true}) + rawPath := stdpath.Join(dst, sub) + obj, err := fs.Get(ctx, rawPath, &fs.GetArgs{NoLog: true}) if err != nil { continue } - return &model.Object{ - Path: path, - Name: obj.GetName(), - Size: obj.GetSize(), - Modified: obj.ModTime(), - IsFolder: obj.IsDir(), - HashInfo: obj.GetHash(), + storage, err := fs.GetStorage(rawPath, &fs.GetStoragesArgs{}) + if ret == nil { + ret = &model.Object{ + Path: path, + Name: obj.GetName(), + Size: obj.GetSize(), + Modified: obj.ModTime(), + IsFolder: obj.IsDir(), + HashInfo: obj.GetHash(), + } + if !d.ProviderPassThrough || err != nil { + break + } + provider = storage.Config().Name + } else if err != nil || provider != storage.GetStorage().Driver { + provider = "" + break + } + } + if ret == nil { + return nil, errs.ObjectNotFound + } + if provider != "" { + return &model.ObjectProvider{ + Object: *ret, + Provider: model.Provider{ + Provider: provider, + }, }, nil } - return nil, errs.ObjectNotFound + return ret, nil } func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { @@ -186,6 +210,35 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) ( return nil, errs.ObjectNotFound } +func (d *Alias) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { + root, sub := d.getRootAndPath(args.Obj.GetPath()) + dsts, ok := d.pathMap[root] + if !ok { + return nil, errs.ObjectNotFound + } + for _, dst := range dsts { + rawPath := stdpath.Join(dst, sub) + storage, actualPath, err := op.GetStorageAndActualPath(rawPath) + if err != nil { + continue + } + other, ok := storage.(driver.Other) + if !ok { + continue + } + obj, err := op.GetUnwrap(ctx, storage, actualPath) + if err != nil { + continue + } + return other.Other(ctx, model.OtherArgs{ + Obj: obj, + Method: args.Method, + Data: args.Data, + }) + } + return nil, errs.NotImplement +} + func (d *Alias) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { if !d.Writable { return errs.PermissionDenied diff --git a/drivers/alias/meta.go b/drivers/alias/meta.go index 83e8bba0a..27c2f8f22 100644 --- a/drivers/alias/meta.go +++ b/drivers/alias/meta.go @@ -15,6 +15,7 @@ type Addition struct { DownloadConcurrency int `json:"download_concurrency" default:"0" required:"false" type:"number" help:"Need to enable proxy"` DownloadPartSize int `json:"download_part_size" default:"0" type:"number" required:"false" help:"Need to enable proxy. Unit: KB"` Writable bool `json:"writable" type:"bool" default:"false"` + ProviderPassThrough bool `json:"provider_pass_through" type:"bool" default:"false"` } var config = driver.Config{ diff --git a/drivers/aliyundrive_open/driver.go b/drivers/aliyundrive_open/driver.go index 4695f4116..20dd92dca 100644 --- a/drivers/aliyundrive_open/driver.go +++ b/drivers/aliyundrive_open/driver.go @@ -291,6 +291,21 @@ func (d *AliyundriveOpen) Other(ctx context.Context, args model.OtherArgs) (inte return resp, nil } +func (d *AliyundriveOpen) GetDetails(ctx context.Context) (*model.StorageDetails, error) { + res, err := d.request(ctx, limiterOther, "/adrive/v1.0/user/getSpaceInfo", http.MethodPost, nil) + if err != nil { + return nil, err + } + total := utils.Json.Get(res, "personal_space_info", "total_size").ToUint64() + used := utils.Json.Get(res, "personal_space_info", "used_size").ToUint64() + return &model.StorageDetails{ + DiskUsage: model.DiskUsage{ + TotalSpace: total, + FreeSpace: total - used, + }, + }, nil +} + var _ driver.Driver = (*AliyundriveOpen)(nil) var _ driver.MkdirResult = (*AliyundriveOpen)(nil) var _ driver.MoveResult = (*AliyundriveOpen)(nil) diff --git a/drivers/all.go b/drivers/all.go index ce614735f..197a936d0 100644 --- a/drivers/all.go +++ b/drivers/all.go @@ -20,6 +20,7 @@ import ( _ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_netdisk" _ "github.com/OpenListTeam/OpenList/v4/drivers/baidu_photo" _ "github.com/OpenListTeam/OpenList/v4/drivers/chaoxing" + _ "github.com/OpenListTeam/OpenList/v4/drivers/chunk" _ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve" _ "github.com/OpenListTeam/OpenList/v4/drivers/cloudreve_v4" _ "github.com/OpenListTeam/OpenList/v4/drivers/cnb_releases" diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 12d581d02..9c39fdcd1 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -460,4 +460,12 @@ func (d *BaiduNetdisk) UploadSliceComplete(ctx context.Context, su *tables.Slice return nil } +func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) { + du, err := d.quota() + if err != nil { + return nil, err + } + return &model.StorageDetails{DiskUsage: *du}, nil +} + var _ driver.Driver = (*BaiduNetdisk)(nil) diff --git a/drivers/baidu_netdisk/types.go b/drivers/baidu_netdisk/types.go index 8de3091d1..594de4863 100644 --- a/drivers/baidu_netdisk/types.go +++ b/drivers/baidu_netdisk/types.go @@ -234,4 +234,11 @@ type SliceUpCompleteResp struct { Ctime uint64 `json:"ctime"` // 文件创建时间 Mtime uint64 `json:"mtime"` // 文件修改时间 Isdir int `json:"isdir"` // 是否目录,0 文件、1 目录 +type QuotaResp struct { + Errno int `json:"errno"` + RequestId int64 `json:"request_id"` + Total uint64 `json:"total"` + Used uint64 `json:"used"` + //Free uint64 `json:"free"` + //Expire bool `json:"expire"` } diff --git a/drivers/baidu_netdisk/util.go b/drivers/baidu_netdisk/util.go index 39b980259..76f26fa20 100644 --- a/drivers/baidu_netdisk/util.go +++ b/drivers/baidu_netdisk/util.go @@ -413,6 +413,18 @@ func (d *BaiduNetdisk) getSliceSize(filesize int64) int64 { return maxSliceSize } +func (d *BaiduNetdisk) quota() (*model.DiskUsage, error) { + var resp QuotaResp + _, err := d.request("https://pan.baidu.com/api/quota", http.MethodGet, nil, &resp) + if err != nil { + return nil, err + } + return &model.DiskUsage{ + TotalSpace: resp.Total, + FreeSpace: resp.Total - resp.Used, + }, nil +} + // func encodeURIComponent(str string) string { // r := url.QueryEscape(str) // r = strings.ReplaceAll(r, "+", "%20") diff --git a/drivers/chunk/driver.go b/drivers/chunk/driver.go new file mode 100644 index 000000000..763469740 --- /dev/null +++ b/drivers/chunk/driver.go @@ -0,0 +1,488 @@ +package chunk + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + stdpath "path" + "strconv" + "strings" + + "github.com/OpenListTeam/OpenList/v4/internal/driver" + "github.com/OpenListTeam/OpenList/v4/internal/errs" + "github.com/OpenListTeam/OpenList/v4/internal/fs" + "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/internal/op" + "github.com/OpenListTeam/OpenList/v4/internal/sign" + "github.com/OpenListTeam/OpenList/v4/internal/stream" + "github.com/OpenListTeam/OpenList/v4/pkg/http_range" + "github.com/OpenListTeam/OpenList/v4/pkg/utils" + "github.com/OpenListTeam/OpenList/v4/server/common" +) + +type Chunk struct { + model.Storage + Addition +} + +func (d *Chunk) Config() driver.Config { + return config +} + +func (d *Chunk) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *Chunk) Init(ctx context.Context) error { + if d.PartSize <= 0 { + return errors.New("part size must be positive") + } + d.RemotePath = utils.FixAndCleanPath(d.RemotePath) + return nil +} + +func (d *Chunk) Drop(ctx context.Context) error { + return nil +} + +func (d *Chunk) Get(ctx context.Context, path string) (model.Obj, error) { + if utils.PathEqual(path, "/") { + return &model.Object{ + Name: "Root", + IsFolder: true, + Path: "/", + }, nil + } + remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath) + if err != nil { + return nil, err + } + remoteActualPath = stdpath.Join(remoteActualPath, path) + if remoteObj, err := op.Get(ctx, remoteStorage, remoteActualPath); err == nil { + return &model.Object{ + Path: path, + Name: remoteObj.GetName(), + Size: remoteObj.GetSize(), + Modified: remoteObj.ModTime(), + IsFolder: remoteObj.IsDir(), + HashInfo: remoteObj.GetHash(), + }, nil + } + + remoteActualDir, name := stdpath.Split(remoteActualPath) + chunkName := "[openlist_chunk]" + name + chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, chunkName), model.ListArgs{}) + if err != nil { + return nil, err + } + var totalSize int64 = 0 + // 0号块必须存在 + chunkSizes := []int64{-1} + h := make(map[*utils.HashType]string) + var first model.Obj + for _, o := range chunkObjs { + if o.IsDir() { + continue + } + if after, ok := strings.CutPrefix(o.GetName(), "hash_"); ok { + hn, value, ok := strings.Cut(strings.TrimSuffix(after, d.CustomExt), "_") + if ok { + ht, ok := utils.GetHashByName(hn) + if ok { + h[ht] = value + } + } + continue + } + idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt)) + if err != nil { + continue + } + totalSize += o.GetSize() + if len(chunkSizes) > idx { + if idx == 0 { + first = o + } + chunkSizes[idx] = o.GetSize() + } else if len(chunkSizes) == idx { + chunkSizes = append(chunkSizes, o.GetSize()) + } else { + newChunkSizes := make([]int64, idx+1) + copy(newChunkSizes, chunkSizes) + chunkSizes = newChunkSizes + chunkSizes[idx] = o.GetSize() + } + } + // 检查0号块不等于-1 以支持空文件 + // 如果块数量大于1 最后一块不可能为0 + // 只检查中间块是否有0 + for i, l := 0, len(chunkSizes)-2; ; i++ { + if i == 0 { + if chunkSizes[i] == -1 { + return nil, fmt.Errorf("chunk part[%d] are missing", i) + } + } else if chunkSizes[i] == 0 { + return nil, fmt.Errorf("chunk part[%d] are missing", i) + } + if i >= l { + break + } + } + reqDir, _ := stdpath.Split(path) + objRes := chunkObject{ + Object: model.Object{ + Path: stdpath.Join(reqDir, chunkName), + Name: name, + Size: totalSize, + Modified: first.ModTime(), + Ctime: first.CreateTime(), + }, + chunkSizes: chunkSizes, + } + if len(h) > 0 { + objRes.HashInfo = utils.NewHashInfoByMap(h) + } + return &objRes, nil +} + +func (d *Chunk) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath) + if err != nil { + return nil, err + } + remoteActualDir := stdpath.Join(remoteActualPath, dir.GetPath()) + remoteObjs, err := op.List(ctx, remoteStorage, remoteActualDir, model.ListArgs{ + ReqPath: args.ReqPath, + Refresh: args.Refresh, + }) + if err != nil { + return nil, err + } + result := make([]model.Obj, 0, len(remoteObjs)) + for _, obj := range remoteObjs { + rawName := obj.GetName() + if obj.IsDir() { + if name, ok := strings.CutPrefix(rawName, "[openlist_chunk]"); ok { + chunkObjs, err := op.List(ctx, remoteStorage, stdpath.Join(remoteActualDir, rawName), model.ListArgs{ + ReqPath: stdpath.Join(args.ReqPath, rawName), + Refresh: args.Refresh, + }) + if err != nil { + return nil, err + } + totalSize := int64(0) + h := make(map[*utils.HashType]string) + first := obj + for _, o := range chunkObjs { + if o.IsDir() { + continue + } + if after, ok := strings.CutPrefix(strings.TrimSuffix(o.GetName(), d.CustomExt), "hash_"); ok { + hn, value, ok := strings.Cut(after, "_") + if ok { + ht, ok := utils.GetHashByName(hn) + if ok { + h[ht] = value + } + continue + } + } + idx, err := strconv.Atoi(strings.TrimSuffix(o.GetName(), d.CustomExt)) + if err != nil { + continue + } + if idx == 0 { + first = o + } + totalSize += o.GetSize() + } + objRes := model.Object{ + Name: name, + Size: totalSize, + Modified: first.ModTime(), + Ctime: first.CreateTime(), + } + if len(h) > 0 { + objRes.HashInfo = utils.NewHashInfoByMap(h) + } + if !d.Thumbnail { + result = append(result, &objRes) + } else { + thumbPath := stdpath.Join(args.ReqPath, ".thumbnails", name+".webp") + thumb := fmt.Sprintf("%s/d%s?sign=%s", + common.GetApiUrl(ctx), + utils.EncodePath(thumbPath, true), + sign.Sign(thumbPath)) + result = append(result, &model.ObjThumb{ + Object: objRes, + Thumbnail: model.Thumbnail{ + Thumbnail: thumb, + }, + }) + } + continue + } + } + + if !d.ShowHidden && strings.HasPrefix(rawName, ".") { + continue + } + thumb, ok := model.GetThumb(obj) + objRes := model.Object{ + Name: rawName, + Size: obj.GetSize(), + Modified: obj.ModTime(), + IsFolder: obj.IsDir(), + HashInfo: obj.GetHash(), + } + if !ok { + result = append(result, &objRes) + } else { + result = append(result, &model.ObjThumb{ + Object: objRes, + Thumbnail: model.Thumbnail{ + Thumbnail: thumb, + }, + }) + } + } + return result, nil +} + +func (d *Chunk) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath) + if err != nil { + return nil, err + } + chunkFile, ok := file.(*chunkObject) + remoteActualPath = stdpath.Join(remoteActualPath, file.GetPath()) + if !ok { + l, _, err := op.Link(ctx, remoteStorage, remoteActualPath, args) + if err != nil { + return nil, err + } + resultLink := *l + resultLink.SyncClosers = utils.NewSyncClosers(l) + return &resultLink, nil + } + fileSize := chunkFile.GetSize() + mergedRrf := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { + start := httpRange.Start + length := httpRange.Length + if length < 0 || start+length > fileSize { + length = fileSize - start + } + if length == 0 { + return io.NopCloser(strings.NewReader("")), nil + } + rs := make([]io.Reader, 0) + cs := make(utils.Closers, 0) + var ( + rc io.ReadCloser + readFrom bool + ) + for idx, chunkSize := range chunkFile.chunkSizes { + if readFrom { + l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args) + if err != nil { + _ = cs.Close() + return nil, err + } + cs = append(cs, l) + chunkSize2 := l.ContentLength + if chunkSize2 <= 0 { + chunkSize2 = o.GetSize() + } + if chunkSize2 != chunkSize { + _ = cs.Close() + return nil, fmt.Errorf("chunk part[%d] size not match", idx) + } + rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l) + if err != nil { + _ = cs.Close() + return nil, err + } + newLength := length - chunkSize2 + if newLength >= 0 { + length = newLength + rc, err = rrf.RangeRead(ctx, http_range.Range{Length: -1}) + } else { + rc, err = rrf.RangeRead(ctx, http_range.Range{Length: length}) + } + if err != nil { + _ = cs.Close() + return nil, err + } + rs = append(rs, rc) + cs = append(cs, rc) + if newLength <= 0 { + return utils.ReadCloser{ + Reader: io.MultiReader(rs...), + Closer: &cs, + }, nil + } + } else if newStart := start - chunkSize; newStart >= 0 { + start = newStart + } else { + l, o, err := op.Link(ctx, remoteStorage, stdpath.Join(remoteActualPath, d.getPartName(idx)), args) + if err != nil { + _ = cs.Close() + return nil, err + } + cs = append(cs, l) + chunkSize2 := l.ContentLength + if chunkSize2 <= 0 { + chunkSize2 = o.GetSize() + } + if chunkSize2 != chunkSize { + _ = cs.Close() + return nil, fmt.Errorf("chunk part[%d] size not match", idx) + } + rrf, err := stream.GetRangeReaderFromLink(chunkSize2, l) + if err != nil { + _ = cs.Close() + return nil, err + } + rc, err = rrf.RangeRead(ctx, http_range.Range{Start: start, Length: -1}) + if err != nil { + _ = cs.Close() + return nil, err + } + length -= chunkSize2 - start + cs = append(cs, rc) + if length <= 0 { + return utils.ReadCloser{ + Reader: rc, + Closer: &cs, + }, nil + } + rs = append(rs, rc) + readFrom = true + } + } + return nil, fmt.Errorf("invalid range: start=%d,length=%d,fileSize=%d", httpRange.Start, httpRange.Length, fileSize) + } + return &model.Link{ + RangeReader: stream.RangeReaderFunc(mergedRrf), + }, nil +} + +func (d *Chunk) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + path := stdpath.Join(d.RemotePath, parentDir.GetPath(), dirName) + return fs.MakeDir(ctx, path) +} + +func (d *Chunk) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + src := stdpath.Join(d.RemotePath, srcObj.GetPath()) + dst := stdpath.Join(d.RemotePath, dstDir.GetPath()) + _, err := fs.Move(ctx, src, dst) + return err +} + +func (d *Chunk) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + if _, ok := srcObj.(*chunkObject); ok { + newName = "[openlist_chunk]" + newName + } + return fs.Rename(ctx, stdpath.Join(d.RemotePath, srcObj.GetPath()), newName) +} + +func (d *Chunk) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + dst := stdpath.Join(d.RemotePath, dstDir.GetPath()) + src := stdpath.Join(d.RemotePath, srcObj.GetPath()) + _, err := fs.Copy(ctx, src, dst) + return err +} + +func (d *Chunk) Remove(ctx context.Context, obj model.Obj) error { + return fs.Remove(ctx, stdpath.Join(d.RemotePath, obj.GetPath())) +} + +func (d *Chunk) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error { + remoteStorage, remoteActualPath, err := op.GetStorageAndActualPath(d.RemotePath) + if err != nil { + return err + } + if d.Thumbnail && dstDir.GetName() == ".thumbnails" { + return op.Put(ctx, remoteStorage, stdpath.Join(remoteActualPath, dstDir.GetPath()), file, up) + } + upReader := &driver.ReaderUpdatingProgress{ + Reader: file, + UpdateProgress: up, + } + dst := stdpath.Join(remoteActualPath, dstDir.GetPath(), "[openlist_chunk]"+file.GetName()) + if d.StoreHash { + for ht, value := range file.GetHash().All() { + _ = op.Put(ctx, remoteStorage, dst, &stream.FileStream{ + Obj: &model.Object{ + Name: fmt.Sprintf("hash_%s_%s%s", ht.Name, value, d.CustomExt), + Size: 1, + Modified: file.ModTime(), + }, + Mimetype: "application/octet-stream", + Reader: bytes.NewReader([]byte{0}), // 兼容不支持空文件的驱动 + }, nil, true) + } + } + fullPartCount := int(file.GetSize() / d.PartSize) + tailSize := file.GetSize() % d.PartSize + if tailSize == 0 && fullPartCount > 0 { + fullPartCount-- + tailSize = d.PartSize + } + partIndex := 0 + for partIndex < fullPartCount { + err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{ + Obj: &model.Object{ + Name: d.getPartName(partIndex), + Size: d.PartSize, + Modified: file.ModTime(), + }, + Mimetype: file.GetMimetype(), + Reader: io.LimitReader(upReader, d.PartSize), + }, nil, true) + if err != nil { + _ = op.Remove(ctx, remoteStorage, dst) + return err + } + partIndex++ + } + err = op.Put(ctx, remoteStorage, dst, &stream.FileStream{ + Obj: &model.Object{ + Name: d.getPartName(fullPartCount), + Size: tailSize, + Modified: file.ModTime(), + }, + Mimetype: file.GetMimetype(), + Reader: upReader, + }, nil) + if err != nil { + _ = op.Remove(ctx, remoteStorage, dst) + } + return err +} + +func (d *Chunk) getPartName(part int) string { + return fmt.Sprintf("%d%s", part, d.CustomExt) +} + +func (d *Chunk) GetDetails(ctx context.Context) (*model.StorageDetails, error) { + remoteStorage, err := fs.GetStorage(d.RemotePath, &fs.GetStoragesArgs{}) + if err != nil { + return nil, errs.NotImplement + } + wd, ok := remoteStorage.(driver.WithDetails) + if !ok { + return nil, errs.NotImplement + } + remoteDetails, err := wd.GetDetails(ctx) + if err != nil { + return nil, err + } + return &model.StorageDetails{ + DiskUsage: remoteDetails.DiskUsage, + }, nil +} + +var _ driver.Driver = (*Chunk)(nil) diff --git a/drivers/chunk/meta.go b/drivers/chunk/meta.go new file mode 100644 index 000000000..45429231f --- /dev/null +++ b/drivers/chunk/meta.go @@ -0,0 +1,31 @@ +package chunk + +import ( + "github.com/OpenListTeam/OpenList/v4/internal/driver" + "github.com/OpenListTeam/OpenList/v4/internal/op" +) + +type Addition struct { + RemotePath string `json:"remote_path" required:"true"` + PartSize int64 `json:"part_size" required:"true" type:"number" help:"bytes"` + CustomExt string `json:"custom_ext" type:"string"` + StoreHash bool `json:"store_hash" type:"bool" default:"true"` + + Thumbnail bool `json:"thumbnail" required:"true" default:"false" help:"enable thumbnail which pre-generated under .thumbnails folder"` + ShowHidden bool `json:"show_hidden" default:"true" required:"false" help:"show hidden directories and files"` +} + +var config = driver.Config{ + Name: "Chunk", + LocalSort: true, + OnlyProxy: true, + NoCache: true, + DefaultRoot: "/", + NoLinkURL: true, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &Chunk{} + }) +} diff --git a/drivers/chunk/obj.go b/drivers/chunk/obj.go new file mode 100644 index 000000000..1885a9257 --- /dev/null +++ b/drivers/chunk/obj.go @@ -0,0 +1,8 @@ +package chunk + +import "github.com/OpenListTeam/OpenList/v4/internal/model" + +type chunkObject struct { + model.Object + chunkSizes []int64 +} diff --git a/drivers/cloudreve_v4/driver.go b/drivers/cloudreve_v4/driver.go index 45549cbcb..e9ce639e5 100644 --- a/drivers/cloudreve_v4/driver.go +++ b/drivers/cloudreve_v4/driver.go @@ -339,6 +339,21 @@ func (d *CloudreveV4) ArchiveDecompress(ctx context.Context, srcObj, dstDir mode return nil, errs.NotImplement } +func (d *CloudreveV4) GetDetails(ctx context.Context) (*model.StorageDetails, error) { + // TODO return storage details (total space, free space, etc.) + var r CapacityResp + err := d.request(http.MethodGet, "/user/capacity", nil, &r) + if err != nil { + return nil, err + } + return &model.StorageDetails{ + DiskUsage: model.DiskUsage{ + TotalSpace: r.Total, + FreeSpace: r.Total - r.Used, + }, + }, nil +} + //func (d *CloudreveV4) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { // return nil, errs.NotSupport //} diff --git a/drivers/cloudreve_v4/types.go b/drivers/cloudreve_v4/types.go index 7c178211c..a10f9fe1c 100644 --- a/drivers/cloudreve_v4/types.go +++ b/drivers/cloudreve_v4/types.go @@ -204,3 +204,9 @@ type FolderSummaryResp struct { CalculatedAt time.Time `json:"calculated_at"` } `json:"folder_summary"` } + +type CapacityResp struct { + Total uint64 `json:"total"` + Used uint64 `json:"used"` + // StoragePackTotal uint64 `json:"storage_pack_total"` +} diff --git a/drivers/crypt/driver.go b/drivers/crypt/driver.go index 704c70cb7..ac5a77976 100644 --- a/drivers/crypt/driver.go +++ b/drivers/crypt/driver.go @@ -411,6 +411,20 @@ func (d *Crypt) Put(ctx context.Context, dstDir model.Obj, streamer model.FileSt return nil } +func (d *Crypt) GetDetails(ctx context.Context) (*model.StorageDetails, error) { + wd, ok := d.remoteStorage.(driver.WithDetails) + if !ok { + return nil, errs.NotImplement + } + remoteDetails, err := wd.GetDetails(ctx) + if err != nil { + return nil, err + } + return &model.StorageDetails{ + DiskUsage: remoteDetails.DiskUsage, + }, nil +} + //func (d *Safe) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { // return nil, errs.NotSupport //} diff --git a/drivers/local/driver.go b/drivers/local/driver.go index 0a78e12db..45a9a104a 100644 --- a/drivers/local/driver.go +++ b/drivers/local/driver.go @@ -51,7 +51,7 @@ func (d *Local) Config() driver.Config { func (d *Local) Init(ctx context.Context) error { if d.MkdirPerm == "" { - d.mkdirPerm = 0777 + d.mkdirPerm = 0o777 } else { v, err := strconv.ParseUint(d.MkdirPerm, 8, 32) if err != nil { @@ -150,6 +150,7 @@ func (d *Local) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([ } return files, nil } + func (d *Local) FileInfoToObj(ctx context.Context, f fs.FileInfo, reqPath string, fullPath string) model.Obj { thumb := "" if d.Thumbnail { @@ -198,7 +199,7 @@ func (d *Local) Get(ctx context.Context, path string) (model.Obj, error) { path = filepath.Join(d.GetRootPath(), path) f, err := os.Stat(path) if err != nil { - if strings.Contains(err.Error(), "cannot find the file") { + if os.IsNotExist(err) { return nil, errs.ObjectNotFound } return nil, err @@ -375,7 +376,7 @@ func (d *Local) Remove(ctx context.Context, obj model.Obj) error { } } else { if !utils.Exists(d.RecycleBinPath) { - err = os.MkdirAll(d.RecycleBinPath, 0755) + err = os.MkdirAll(d.RecycleBinPath, 0o755) if err != nil { return err } @@ -434,4 +435,14 @@ func (d *Local) Put(ctx context.Context, dstDir model.Obj, stream model.FileStre return nil } +func (d *Local) GetDetails(ctx context.Context) (*model.StorageDetails, error) { + du, err := getDiskUsage(d.RootFolderPath) + if err != nil { + return nil, err + } + return &model.StorageDetails{ + DiskUsage: du, + }, nil +} + var _ driver.Driver = (*Local)(nil) diff --git a/drivers/local/util_unix.go b/drivers/local/util_unix.go index ddb4879f0..3362df341 100644 --- a/drivers/local/util_unix.go +++ b/drivers/local/util_unix.go @@ -5,8 +5,25 @@ package local import ( "io/fs" "strings" + "syscall" + + "github.com/OpenListTeam/OpenList/v4/internal/model" ) func isHidden(f fs.FileInfo, _ string) bool { return strings.HasPrefix(f.Name(), ".") } + +func getDiskUsage(path string) (model.DiskUsage, error) { + var stat syscall.Statfs_t + err := syscall.Statfs(path, &stat) + if err != nil { + return model.DiskUsage{}, err + } + total := stat.Blocks * uint64(stat.Bsize) + free := stat.Bfree * uint64(stat.Bsize) + return model.DiskUsage{ + TotalSpace: total, + FreeSpace: free, + }, nil +} diff --git a/drivers/local/util_windows.go b/drivers/local/util_windows.go index 8df191cb2..370640098 100644 --- a/drivers/local/util_windows.go +++ b/drivers/local/util_windows.go @@ -1,22 +1,51 @@ -//go:build windows - -package local - -import ( - "io/fs" - "path/filepath" - "syscall" -) - -func isHidden(f fs.FileInfo, fullPath string) bool { - filePath := filepath.Join(fullPath, f.Name()) - namePtr, err := syscall.UTF16PtrFromString(filePath) - if err != nil { - return false - } - attrs, err := syscall.GetFileAttributes(namePtr) - if err != nil { - return false - } - return attrs&syscall.FILE_ATTRIBUTE_HIDDEN != 0 -} +//go:build windows + +package local + +import ( + "errors" + "io/fs" + "path/filepath" + "syscall" + + "github.com/OpenListTeam/OpenList/v4/internal/model" + "golang.org/x/sys/windows" +) + +func isHidden(f fs.FileInfo, fullPath string) bool { + filePath := filepath.Join(fullPath, f.Name()) + namePtr, err := syscall.UTF16PtrFromString(filePath) + if err != nil { + return false + } + attrs, err := syscall.GetFileAttributes(namePtr) + if err != nil { + return false + } + return attrs&syscall.FILE_ATTRIBUTE_HIDDEN != 0 +} + +func getDiskUsage(path string) (model.DiskUsage, error) { + abs, err := filepath.Abs(path) + if err != nil { + return model.DiskUsage{}, err + } + root := filepath.VolumeName(abs) + if len(root) != 2 || root[1] != ':' { + return model.DiskUsage{}, errors.New("cannot get disk label") + } + var freeBytes, totalBytes, totalFreeBytes uint64 + err = windows.GetDiskFreeSpaceEx( + windows.StringToUTF16Ptr(root), + &freeBytes, + &totalBytes, + &totalFreeBytes, + ) + if err != nil { + return model.DiskUsage{}, err + } + return model.DiskUsage{ + TotalSpace: totalBytes, + FreeSpace: freeBytes, + }, nil +} diff --git a/drivers/quark_uc_tv/driver.go b/drivers/quark_uc_tv/driver.go index 038f768fe..dc062d9ff 100644 --- a/drivers/quark_uc_tv/driver.go +++ b/drivers/quark_uc_tv/driver.go @@ -95,14 +95,22 @@ func (d *QuarkUCTV) List(ctx context.Context, dir model.Obj, args model.ListArgs files := make([]model.Obj, 0) pageIndex := int64(0) pageSize := int64(100) + desc := "1" + orderBy := "3" + if d.OrderDirection == "asc" { + desc = "0" + } + if d.OrderBy == "file_name" { + orderBy = "1" + } for { var filesData FilesData _, err := d.request(ctx, "/file", http.MethodGet, func(req *resty.Request) { req.SetQueryParams(map[string]string{ "method": "list", "parent_fid": dir.GetID(), - "order_by": "3", - "desc": "1", + "order_by": orderBy, + "desc": desc, "category": "", "source": "", "ex_source": "", diff --git a/drivers/quark_uc_tv/meta.go b/drivers/quark_uc_tv/meta.go index 558d121af..c1e871dd8 100644 --- a/drivers/quark_uc_tv/meta.go +++ b/drivers/quark_uc_tv/meta.go @@ -8,6 +8,8 @@ import ( type Addition struct { // Usually one of two driver.RootID + OrderBy string `json:"order_by" type:"select" options:"file_name,updated_at" default:"updated_at"` + OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" default:"desc"` // define other RefreshToken string `json:"refresh_token" required:"false" default:""` // 必要且影响登录,由签名决定 diff --git a/drivers/sftp/driver.go b/drivers/sftp/driver.go index 7de242485..17db40380 100644 --- a/drivers/sftp/driver.go +++ b/drivers/sftp/driver.go @@ -4,6 +4,7 @@ import ( "context" "os" "path" + "strings" "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/errs" @@ -127,4 +128,22 @@ func (d *SFTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStrea return err } +func (d *SFTP) GetDetails(ctx context.Context) (*model.StorageDetails, error) { + stat, err := d.client.StatVFS(d.RootFolderPath) + if err != nil { + if strings.Contains(err.Error(), "unimplemented") { + return nil, errs.NotImplement + } + return nil, err + } + total := stat.Blocks * stat.Bsize + free := stat.Bfree * stat.Bsize + return &model.StorageDetails{ + DiskUsage: model.DiskUsage{ + TotalSpace: total, + FreeSpace: free, + }, + }, nil +} + var _ driver.Driver = (*SFTP)(nil) diff --git a/drivers/smb/driver.go b/drivers/smb/driver.go index 3e12f1220..910394ccd 100644 --- a/drivers/smb/driver.go +++ b/drivers/smb/driver.go @@ -205,6 +205,22 @@ func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStream return nil } +func (d *SMB) GetDetails(ctx context.Context) (*model.StorageDetails, error) { + if err := d.checkConn(); err != nil { + return nil, err + } + stat, err := d.fs.Statfs(d.RootFolderPath) + if err != nil { + return nil, err + } + return &model.StorageDetails{ + DiskUsage: model.DiskUsage{ + TotalSpace: stat.BlockSize() * stat.TotalBlockCount(), + FreeSpace: stat.BlockSize() * stat.AvailableBlockCount(), + }, + }, nil +} + //func (d *SMB) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { // return nil, errs.NotSupport //} diff --git a/drivers/template/driver.go b/drivers/template/driver.go index 5587dfea7..477ca7f7e 100644 --- a/drivers/template/driver.go +++ b/drivers/template/driver.go @@ -93,6 +93,11 @@ func (d *Template) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.O return nil, errs.NotImplement } +func (d *Template) GetDetails(ctx context.Context) (*model.StorageDetails, error) { + // TODO return storage details (total space, free space, etc.) + return nil, errs.NotImplement +} + //func (d *Template) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { // return nil, errs.NotSupport //} diff --git a/internal/archive/archives/archives.go b/internal/archive/archives/archives.go index d9c59aa9f..6f6245a8b 100644 --- a/internal/archive/archives/archives.go +++ b/internal/archive/archives/archives.go @@ -1,10 +1,11 @@ package archives import ( + "fmt" "io" "io/fs" "os" - stdpath "path" + "path/filepath" "strings" "github.com/OpenListTeam/OpenList/v4/internal/archive/tool" @@ -107,7 +108,7 @@ func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args } if stat.IsDir() { isDir = true - outputPath = stdpath.Join(outputPath, stat.Name()) + outputPath = filepath.Join(outputPath, stat.Name()) err = os.Mkdir(outputPath, 0700) if err != nil { return filterPassword(err) @@ -120,11 +121,14 @@ func (Archives) Decompress(ss []*stream.SeekableStream, outputPath string, args return err } relPath := strings.TrimPrefix(p, path+"/") - dstPath := stdpath.Join(outputPath, relPath) + dstPath := filepath.Join(outputPath, relPath) + if !strings.HasPrefix(dstPath, outputPath+string(os.PathSeparator)) { + return fmt.Errorf("illegal file path: %s", relPath) + } if d.IsDir() { err = os.MkdirAll(dstPath, 0700) } else { - dir := stdpath.Dir(dstPath) + dir := filepath.Dir(dstPath) err = decompress(fsys, p, dir, func(_ float64) {}) } return err diff --git a/internal/archive/archives/utils.go b/internal/archive/archives/utils.go index ce84da093..ddead84c9 100644 --- a/internal/archive/archives/utils.go +++ b/internal/archive/archives/utils.go @@ -1,10 +1,11 @@ package archives import ( + "fmt" "io" fs2 "io/fs" "os" - stdpath "path" + "path/filepath" "strings" "github.com/OpenListTeam/OpenList/v4/internal/errs" @@ -69,7 +70,11 @@ func decompress(fsys fs2.FS, filePath, targetPath string, up model.UpdateProgres if err != nil { return err } - f, err := os.OpenFile(stdpath.Join(targetPath, stat.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + destPath := filepath.Join(targetPath, stat.Name()) + if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) { + return fmt.Errorf("illegal file path: %s", stat.Name()) + } + f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } diff --git a/internal/archive/iso9660/iso9660.go b/internal/archive/iso9660/iso9660.go index eb4e975d1..7b26dcaee 100644 --- a/internal/archive/iso9660/iso9660.go +++ b/internal/archive/iso9660/iso9660.go @@ -1,9 +1,11 @@ package iso9660 import ( + "fmt" "io" "os" - stdpath "path" + "path/filepath" + "strings" "github.com/OpenListTeam/OpenList/v4/internal/archive/tool" "github.com/OpenListTeam/OpenList/v4/internal/errs" @@ -79,7 +81,11 @@ func (ISO9660) Decompress(ss []*stream.SeekableStream, outputPath string, args m } if obj.IsDir() { if args.InnerPath != "/" { - outputPath = stdpath.Join(outputPath, obj.Name()) + rootpath := outputPath + outputPath = filepath.Join(outputPath, obj.Name()) + if !strings.HasPrefix(outputPath, rootpath+string(os.PathSeparator)) { + return fmt.Errorf("illegal file path: %s", obj.Name()) + } if err = os.MkdirAll(outputPath, 0700); err != nil { return err } diff --git a/internal/archive/iso9660/utils.go b/internal/archive/iso9660/utils.go index e3326b9b9..0e9151332 100644 --- a/internal/archive/iso9660/utils.go +++ b/internal/archive/iso9660/utils.go @@ -1,8 +1,9 @@ package iso9660 import ( + "fmt" "os" - stdpath "path" + "path/filepath" "strings" "github.com/OpenListTeam/OpenList/v4/internal/errs" @@ -62,7 +63,11 @@ func toModelObj(file *iso9660.File) model.Obj { } func decompress(f *iso9660.File, path string, up model.UpdateProgress) error { - file, err := os.OpenFile(stdpath.Join(path, f.Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + destPath := filepath.Join(path, f.Name()) + if !strings.HasPrefix(destPath, path+string(os.PathSeparator)) { + return fmt.Errorf("illegal file path: %s", f.Name()) + } + file, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } @@ -84,7 +89,10 @@ func decompressAll(children []*iso9660.File, path string) error { if err != nil { return err } - nextPath := stdpath.Join(path, child.Name()) + nextPath := filepath.Join(path, child.Name()) + if !strings.HasPrefix(nextPath, path+string(os.PathSeparator)) { + return fmt.Errorf("illegal file path: %s", child.Name()) + } if err = os.MkdirAll(nextPath, 0700); err != nil { return err } diff --git a/internal/archive/rardecode/rardecode.go b/internal/archive/rardecode/rardecode.go index d2c6a448a..13a22e3e8 100644 --- a/internal/archive/rardecode/rardecode.go +++ b/internal/archive/rardecode/rardecode.go @@ -3,7 +3,7 @@ package rardecode import ( "io" "os" - stdpath "path" + "path/filepath" "strings" "github.com/OpenListTeam/OpenList/v4/internal/archive/tool" @@ -93,7 +93,7 @@ func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, arg } } else { innerPath := strings.TrimPrefix(args.InnerPath, "/") - innerBase := stdpath.Base(innerPath) + innerBase := filepath.Base(innerPath) createdBaseDir := false for { var header *rardecode.FileHeader @@ -115,7 +115,7 @@ func (RarDecoder) Decompress(ss []*stream.SeekableStream, outputPath string, arg } break } else if strings.HasPrefix(name, innerPath+"/") { - targetPath := stdpath.Join(outputPath, innerBase) + targetPath := filepath.Join(outputPath, innerBase) if !createdBaseDir { err = os.Mkdir(targetPath, 0700) if err != nil { diff --git a/internal/archive/rardecode/utils.go b/internal/archive/rardecode/utils.go index 93a71da9e..e933005a1 100644 --- a/internal/archive/rardecode/utils.go +++ b/internal/archive/rardecode/utils.go @@ -5,7 +5,7 @@ import ( "io" "io/fs" "os" - stdpath "path" + "path/filepath" "sort" "strings" "time" @@ -124,7 +124,7 @@ type WrapFileInfo struct { } func (f *WrapFileInfo) Name() string { - return stdpath.Base(f.File.Name) + return filepath.Base(f.File.Name) } func (f *WrapFileInfo) Size() int64 { @@ -183,12 +183,16 @@ func getReader(ss []*stream.SeekableStream, password string) (*rardecode.Reader, func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath, outputPath string) error { targetPath := outputPath - dir, base := stdpath.Split(filePath) + dir, base := filepath.Split(filePath) if dir != "" { - targetPath = stdpath.Join(targetPath, dir) - err := os.MkdirAll(targetPath, 0700) - if err != nil { - return err + targetPath = filepath.Join(targetPath, dir) + if strings.HasPrefix(targetPath, outputPath+string(os.PathSeparator)) { + err := os.MkdirAll(targetPath, 0700) + if err != nil { + return err + } + } else { + targetPath = outputPath } } if base != "" { @@ -201,7 +205,11 @@ func decompress(reader *rardecode.Reader, header *rardecode.FileHeader, filePath } func _decompress(reader *rardecode.Reader, header *rardecode.FileHeader, targetPath string, up model.UpdateProgress) error { - f, err := os.OpenFile(stdpath.Join(targetPath, stdpath.Base(header.Name)), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + destPath := filepath.Join(targetPath, filepath.Base(header.Name)) + if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) { + return fmt.Errorf("illegal file path: %s", filepath.Base(header.Name)) + } + f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } diff --git a/internal/archive/tool/helper.go b/internal/archive/tool/helper.go index 6b5658a91..adbe56ed0 100644 --- a/internal/archive/tool/helper.go +++ b/internal/archive/tool/helper.go @@ -1,10 +1,11 @@ package tool import ( + "fmt" "io" "io/fs" "os" - stdpath "path" + "path/filepath" "strings" "github.com/OpenListTeam/OpenList/v4/internal/model" @@ -40,13 +41,13 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree isNewFolder := false if !file.FileInfo().IsDir() { // 先将 文件 添加到 所在的文件夹 - dir = stdpath.Dir(name) + dir = filepath.Dir(name) dirObj = dirMap[dir] if dirObj == nil { isNewFolder = dir != "." dirObj = &model.ObjectTree{} dirObj.IsFolder = true - dirObj.Name = stdpath.Base(dir) + dirObj.Name = filepath.Base(dir) dirObj.Modified = file.FileInfo().ModTime() dirMap[dir] = dirObj } @@ -64,28 +65,28 @@ func GenerateMetaTreeFromFolderTraversal(r ArchiveReader) (bool, []model.ObjTree dirMap[dir] = dirObj } dirObj.IsFolder = true - dirObj.Name = stdpath.Base(dir) + dirObj.Name = filepath.Base(dir) dirObj.Modified = file.FileInfo().ModTime() } if isNewFolder { // 将 文件夹 添加到 父文件夹 // 考虑压缩包仅记录文件的路径,不记录文件夹 // 循环创建所有父文件夹 - parentDir := stdpath.Dir(dir) + parentDir := filepath.Dir(dir) for { parentDirObj := dirMap[parentDir] if parentDirObj == nil { parentDirObj = &model.ObjectTree{} if parentDir != "." { parentDirObj.IsFolder = true - parentDirObj.Name = stdpath.Base(parentDir) + parentDirObj.Name = filepath.Base(parentDir) parentDirObj.Modified = file.FileInfo().ModTime() } dirMap[parentDir] = parentDirObj } parentDirObj.Children = append(parentDirObj.Children, dirObj) - parentDir = stdpath.Dir(parentDir) + parentDir = filepath.Dir(parentDir) if dirMap[parentDir] != nil { break } @@ -127,7 +128,7 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode } } else { innerPath := strings.TrimPrefix(args.InnerPath, "/") - innerBase := stdpath.Base(innerPath) + innerBase := filepath.Base(innerPath) createdBaseDir := false for _, file := range files { name := file.Name() @@ -138,7 +139,7 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode } break } else if strings.HasPrefix(name, innerPath+"/") { - targetPath := stdpath.Join(outputPath, innerBase) + targetPath := filepath.Join(outputPath, innerBase) if !createdBaseDir { err = os.Mkdir(targetPath, 0700) if err != nil { @@ -159,12 +160,16 @@ func DecompressFromFolderTraversal(r ArchiveReader, outputPath string, args mode func decompress(file SubFile, filePath, outputPath, password string) error { targetPath := outputPath - dir, base := stdpath.Split(filePath) + dir, base := filepath.Split(filePath) if dir != "" { - targetPath = stdpath.Join(targetPath, dir) - err := os.MkdirAll(targetPath, 0700) - if err != nil { - return err + targetPath = filepath.Join(targetPath, dir) + if strings.HasPrefix(targetPath, outputPath+string(os.PathSeparator)) { + err := os.MkdirAll(targetPath, 0700) + if err != nil { + return err + } + } else { + targetPath = outputPath } } if base != "" { @@ -185,7 +190,11 @@ func _decompress(file SubFile, targetPath, password string, up model.UpdateProgr return err } defer func() { _ = rc.Close() }() - f, err := os.OpenFile(stdpath.Join(targetPath, file.FileInfo().Name()), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + destPath := filepath.Join(targetPath, file.FileInfo().Name()) + if !strings.HasPrefix(destPath, targetPath+string(os.PathSeparator)) { + return fmt.Errorf("illegal file path: %s", file.FileInfo().Name()) + } + f, err := os.OpenFile(destPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) if err != nil { return err } diff --git a/internal/bootstrap/data/setting.go b/internal/bootstrap/data/setting.go index f00305119..7bbc1b6ef 100644 --- a/internal/bootstrap/data/setting.go +++ b/internal/bootstrap/data/setting.go @@ -100,7 +100,7 @@ func InitialSettings() []model.SettingItem { //{Key: conf.ApiUrl, Value: "", Type: conf.TypeString, Group: model.SITE}, //{Key: conf.BasePath, Value: "", Type: conf.TypeString, Group: model.SITE}, {Key: conf.SiteTitle, Value: "OpenList", Type: conf.TypeString, Group: model.SITE}, - {Key: conf.Announcement, Value: "### repo\nhttps://github.com/OpenListTeam/OpenList", Type: conf.TypeText, Group: model.SITE}, + {Key: conf.Announcement, Value: "Welcome to the OpenList project!\nFor the latest updates, to contribute code, or to submit suggestions and issues, please visit our [project repository](https://github.com/OpenListTeam/OpenList).", Type: conf.TypeText, Group: model.SITE}, {Key: "pagination_type", Value: "all", Type: conf.TypeSelect, Options: "all,pagination,load_more,auto_load_more", Group: model.SITE}, {Key: "default_page_size", Value: "30", Type: conf.TypeNumber, Group: model.SITE}, {Key: conf.AllowIndexed, Value: "false", Type: conf.TypeBool, Group: model.SITE}, @@ -114,6 +114,7 @@ func InitialSettings() []model.SettingItem { {Key: "share_icon", Value: "🎁", Type: conf.TypeString, Group: model.STYLE}, {Key: "home_container", Value: "max_980px", Type: conf.TypeSelect, Options: "max_980px,hope_container", Group: model.STYLE}, {Key: "settings_layout", Value: "list", Type: conf.TypeSelect, Options: "list,responsive", Group: model.STYLE}, + {Key: conf.HideStorageDetails, Value: "false", Type: conf.TypeBool, Group: model.STYLE, Flag: model.PRIVATE}, // preview settings {Key: conf.TextTypes, Value: "txt,htm,html,xml,java,properties,sql,js,md,json,conf,ini,vue,php,py,bat,gitignore,yml,go,sh,c,cpp,h,hpp,tsx,vtt,srt,ass,rs,lrc,strm", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, {Key: conf.AudioTypes, Value: "mp3,flac,ogg,m4a,wav,opus,wma", Type: conf.TypeText, Group: model.PREVIEW, Flag: model.PRIVATE}, diff --git a/internal/conf/const.go b/internal/conf/const.go index 9c883ed53..c506c9ca4 100644 --- a/internal/conf/const.go +++ b/internal/conf/const.go @@ -17,9 +17,10 @@ const ( AllowMounted = "allow_mounted" RobotsTxt = "robots_txt" - Logo = "logo" // multi-lines text, L1: light, EOL: dark - Favicon = "favicon" - MainColor = "main_color" + Logo = "logo" // multi-lines text, L1: light, EOL: dark + Favicon = "favicon" + MainColor = "main_color" + HideStorageDetails = "hide_storage_details" // preview TextTypes = "text_types" diff --git a/internal/driver/driver.go b/internal/driver/driver.go index 2d0e2b137..e9ce8e108 100644 --- a/internal/driver/driver.go +++ b/internal/driver/driver.go @@ -229,6 +229,11 @@ type ArchiveDecompressResult interface { ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj, args model.ArchiveDecompressArgs) ([]model.Obj, error) } +type WithDetails interface { + // GetDetails get storage details (total space, free space, etc.) + GetDetails(ctx context.Context) (*model.StorageDetails, error) +} + type Reference interface { InitReference(storage Driver) error } diff --git a/internal/fs/fs.go b/internal/fs/fs.go index 69ecd853b..77cca2cbf 100644 --- a/internal/fs/fs.go +++ b/internal/fs/fs.go @@ -20,8 +20,9 @@ import ( // then pass the actual path to the op package type ListArgs struct { - Refresh bool - NoLog bool + Refresh bool + NoLog bool + WithStorageDetails bool } func List(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) { @@ -36,11 +37,12 @@ func List(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) } type GetArgs struct { - NoLog bool + NoLog bool + WithStorageDetails bool } func Get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) { - res, err := get(ctx, path) + res, err := get(ctx, path, args) if err != nil { if !args.NoLog { log.Warnf("failed get %s: %s", path, err) diff --git a/internal/fs/get.go b/internal/fs/get.go index 2761322d3..4e91c5bde 100644 --- a/internal/fs/get.go +++ b/internal/fs/get.go @@ -11,11 +11,11 @@ import ( "github.com/pkg/errors" ) -func get(ctx context.Context, path string) (model.Obj, error) { +func get(ctx context.Context, path string, args *GetArgs) (model.Obj, error) { path = utils.FixAndCleanPath(path) // maybe a virtual file if path != "/" { - virtualFiles := op.GetStorageVirtualFilesByPath(stdpath.Dir(path)) + virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, stdpath.Dir(path), !args.WithStorageDetails) for _, f := range virtualFiles { if f.GetName() == stdpath.Base(path) { return f, nil diff --git a/internal/fs/list.go b/internal/fs/list.go index cfc132294..aa2f47f0b 100644 --- a/internal/fs/list.go +++ b/internal/fs/list.go @@ -15,7 +15,7 @@ import ( func list(ctx context.Context, path string, args *ListArgs) ([]model.Obj, error) { meta, _ := ctx.Value(conf.MetaKey).(*model.Meta) user, _ := ctx.Value(conf.UserKey).(*model.User) - virtualFiles := op.GetStorageVirtualFilesByPath(path) + virtualFiles := op.GetStorageVirtualFilesWithDetailsByPath(ctx, path, !args.WithStorageDetails) storage, actualPath, err := op.GetStorageAndActualPath(path) if err != nil && len(virtualFiles) == 0 { return nil, errors.WithMessage(err, "failed get storage") diff --git a/internal/model/obj.go b/internal/model/obj.go index 750dc2698..a30b531a2 100644 --- a/internal/model/obj.go +++ b/internal/model/obj.go @@ -80,6 +80,10 @@ type SetPath interface { SetPath(path string) } +type ObjWithProvider interface { + GetProvider() string +} + func SortFiles(objs []Obj, orderBy, orderDirection string) { if orderBy == "" { return @@ -166,6 +170,16 @@ func GetUrl(obj Obj) (url string, ok bool) { return url, false } +func GetProvider(obj Obj) (string, bool) { + if obj, ok := obj.(ObjWithProvider); ok { + return obj.GetProvider(), true + } + if unwrap, ok := obj.(ObjUnwrap); ok { + return GetProvider(unwrap.Unwrap()) + } + return "unknown", false +} + func GetRawObject(obj Obj) *Object { switch v := obj.(type) { case *ObjThumbURL: @@ -174,6 +188,8 @@ func GetRawObject(obj Obj) *Object { return &v.Object case *ObjectURL: return &v.Object + case *ObjectProvider: + return &v.Object case *Object: return v } diff --git a/internal/model/object.go b/internal/model/object.go index b69407465..8e5cdf047 100644 --- a/internal/model/object.go +++ b/internal/model/object.go @@ -99,3 +99,16 @@ type ObjThumbURL struct { Thumbnail Url } + +type Provider struct { + Provider string +} + +func (p Provider) GetProvider() string { + return p.Provider +} + +type ObjectProvider struct { + Object + Provider +} diff --git a/internal/model/storage.go b/internal/model/storage.go index 1f60667e6..8c754e0fe 100644 --- a/internal/model/storage.go +++ b/internal/model/storage.go @@ -55,3 +55,40 @@ func (p Proxy) Webdav302() bool { func (p Proxy) WebdavProxyURL() bool { return p.WebdavPolicy == "use_proxy_url" } + +type DiskUsage struct { + TotalSpace uint64 `json:"total_space"` + FreeSpace uint64 `json:"free_space"` +} + +type StorageDetails struct { + DiskUsage +} + +type StorageDetailsWithName struct { + *StorageDetails + DriverName string `json:"driver_name"` +} + +type ObjWithStorageDetails interface { + GetStorageDetails() *StorageDetailsWithName +} + +type ObjStorageDetails struct { + Obj + StorageDetailsWithName +} + +func (o ObjStorageDetails) GetStorageDetails() *StorageDetailsWithName { + return &o.StorageDetailsWithName +} + +func GetStorageDetails(obj Obj) (*StorageDetailsWithName, bool) { + if obj, ok := obj.(ObjWithStorageDetails); ok { + return obj.GetStorageDetails(), true + } + if unwrap, ok := obj.(ObjUnwrap); ok { + return GetStorageDetails(unwrap.Unwrap()) + } + return nil, false +} diff --git a/internal/net/request.go b/internal/net/request.go index 399e01f37..1306bc549 100644 --- a/internal/net/request.go +++ b/internal/net/request.go @@ -125,7 +125,7 @@ type ConcurrencyLimit struct { Limit int // 需要大于0 } -var ErrExceedMaxConcurrency = ErrorHttpStatusCode(http.StatusTooManyRequests) +var ErrExceedMaxConcurrency = HttpStatusCodeError(http.StatusTooManyRequests) func (l *ConcurrencyLimit) sub() error { l._m.Lock() @@ -403,7 +403,7 @@ var errInfiniteRetry = errors.New("infinite retry") func (d *downloader) tryDownloadChunk(params *HttpRequestParams, ch *chunk) (int64, error) { resp, err := d.cfg.HttpClient(d.ctx, params) if err != nil { - statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode) + statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError) if !ok { return 0, err } diff --git a/internal/net/serve.go b/internal/net/serve.go index 1fd40b1c1..6ffe41204 100644 --- a/internal/net/serve.go +++ b/internal/net/serve.go @@ -114,7 +114,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time reader, err := RangeReadCloser.RangeRead(ctx, http_range.Range{Length: -1}) if err != nil { code = http.StatusRequestedRangeNotSatisfiable - if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok { + if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok { code = int(statusCode) } http.Error(w, err.Error(), code) @@ -137,7 +137,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time sendContent, err = RangeReadCloser.RangeRead(ctx, ra) if err != nil { code = http.StatusRequestedRangeNotSatisfiable - if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok { + if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok { code = int(statusCode) } http.Error(w, err.Error(), code) @@ -199,7 +199,7 @@ func ServeHTTP(w http.ResponseWriter, r *http.Request, name string, modTime time log.Warnf("Maybe size incorrect or reader not giving correct/full data, or connection closed before finish. written bytes: %d ,sendSize:%d, ", written, sendSize) } code = http.StatusInternalServerError - if statusCode, ok := errors.Unwrap(err).(ErrorHttpStatusCode); ok { + if statusCode, ok := errors.Unwrap(err).(HttpStatusCodeError); ok { code = int(statusCode) } w.WriteHeader(code) @@ -253,14 +253,14 @@ func RequestHttp(ctx context.Context, httpMethod string, headerOverride http.Hea _ = res.Body.Close() msg := string(all) log.Debugln(msg) - return nil, fmt.Errorf("http request [%s] failure,status: %w response:%s", URL, ErrorHttpStatusCode(res.StatusCode), msg) + return nil, fmt.Errorf("http request [%s] failure,status: %w response:%s", URL, HttpStatusCodeError(res.StatusCode), msg) } return res, nil } -type ErrorHttpStatusCode int +type HttpStatusCodeError int -func (e ErrorHttpStatusCode) Error() string { +func (e HttpStatusCodeError) Error() string { return fmt.Sprintf("%d|%s", e, http.StatusText(int(e))) } diff --git a/internal/op/archive.go b/internal/op/archive.go index 964e93970..4d85d2064 100644 --- a/internal/op/archive.go +++ b/internal/op/archive.go @@ -405,11 +405,8 @@ func DriverExtract(ctx context.Context, storage driver.Driver, path string, args return nil }) link, err, _ := extractG.Do(key, fn) - if err == nil && !link.AcquireReference() { + for err == nil && !link.AcquireReference() { link, err, _ = extractG.Do(key, fn) - if err == nil { - link.AcquireReference() - } } if err == errLinkMFileCache { if linkM != nil { diff --git a/internal/op/fs.go b/internal/op/fs.go index 114c26fcc..2f3be94bc 100644 --- a/internal/op/fs.go +++ b/internal/op/fs.go @@ -184,6 +184,9 @@ func Get(ctx context.Context, storage driver.Driver, path string) (model.Obj, er if err == nil { return model.WrapObjName(obj), nil } + if !errs.IsNotImplement(err) { + return nil, errors.WithMessage(err, "failed to get obj") + } } // is root folder @@ -327,11 +330,8 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li return nil }) link, err, _ := linkG.Do(key, fn) - if err == nil && !link.AcquireReference() { + for err == nil && !link.AcquireReference() { link, err, _ = linkG.Do(key, fn) - if err == nil { - link.AcquireReference() - } } if err == errLinkMFileCache { @@ -630,6 +630,11 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod up = func(p float64) {} } + // 如果小于0,则通过缓存获取完整大小,可能发生于流式上传 + if file.GetSize() < 0 { + log.Warnf("file size < 0, try to get full size from cache") + file.CacheFullAndWriter(nil, nil) + } switch s := storage.(type) { case driver.PutResult: var newObj model.Obj diff --git a/internal/op/storage.go b/internal/op/storage.go index f24a098d1..b4daff62f 100644 --- a/internal/op/storage.go +++ b/internal/op/storage.go @@ -15,7 +15,6 @@ import ( "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/pkg/generic_sync" "github.com/OpenListTeam/OpenList/v4/pkg/utils" - mapset "github.com/deckarep/golang-set/v2" "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -335,6 +334,40 @@ func getStoragesByPath(path string) []driver.Driver { // for example, there are: /a/b,/a/c,/a/d/e,/a/b.balance1,/av // GetStorageVirtualFilesByPath(/a) => b,c,d func GetStorageVirtualFilesByPath(prefix string) []model.Obj { + return getStorageVirtualFilesByPath(prefix, func(_ driver.Driver, obj model.Obj) model.Obj { + return obj + }) +} + +func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string, hideDetails ...bool) []model.Obj { + if utils.IsBool(hideDetails...) { + return GetStorageVirtualFilesByPath(prefix) + } + return getStorageVirtualFilesByPath(prefix, func(d driver.Driver, obj model.Obj) model.Obj { + ret := &model.ObjStorageDetails{ + Obj: obj, + StorageDetailsWithName: model.StorageDetailsWithName{ + StorageDetails: nil, + DriverName: d.Config().Name, + }, + } + storage, ok := d.(driver.WithDetails) + if !ok { + return ret + } + details, err := storage.GetDetails(ctx) + if err != nil { + if !errors.Is(err, errs.NotImplement) { + log.Errorf("failed get %s storage details: %+v", d.GetStorage().MountPath, err) + } + return ret + } + ret.StorageDetails = details + return ret + }) +} + +func getStorageVirtualFilesByPath(prefix string, rootCallback func(driver.Driver, model.Obj) model.Obj) []model.Obj { files := make([]model.Obj, 0) storages := storagesMap.Values() sort.Slice(storages, func(i, j int) bool { @@ -345,21 +378,30 @@ func GetStorageVirtualFilesByPath(prefix string) []model.Obj { }) prefix = utils.FixAndCleanPath(prefix) - set := mapset.NewSet[string]() + set := make(map[string]int) for _, v := range storages { mountPath := utils.GetActualMountPath(v.GetStorage().MountPath) // Exclude prefix itself and non prefix if len(prefix) >= len(mountPath) || !utils.IsSubPath(prefix, mountPath) { continue } - name := strings.SplitN(strings.TrimPrefix(mountPath[len(prefix):], "/"), "/", 2)[0] - if set.Add(name) { - files = append(files, &model.Object{ - Name: name, + names := strings.SplitN(strings.TrimPrefix(mountPath[len(prefix):], "/"), "/", 2) + idx, ok := set[names[0]] + if !ok { + set[names[0]] = len(files) + obj := &model.Object{ + Name: names[0], Size: 0, Modified: v.GetStorage().Modified, IsFolder: true, - }) + } + if len(names) == 1 { + files = append(files, rootCallback(v, obj)) + } else { + files = append(files, obj) + } + } else if len(names) == 1 { + files[idx] = rootCallback(v, files[idx]) } } return files diff --git a/internal/stream/stream.go b/internal/stream/stream.go index 947727612..8d2f504fd 100644 --- a/internal/stream/stream.go +++ b/internal/stream/stream.go @@ -137,6 +137,60 @@ func (f *FileStream) CacheFullAndWriter(up *model.UpdateProgress, writer io.Writ if writer != nil { reader = io.TeeReader(reader, writer) } + + if f.GetSize() < 0 { + if f.peekBuff == nil { + f.peekBuff = &buffer.Reader{} + } + // 检查是否有数据 + buf := []byte{0} + n, err := io.ReadFull(reader, buf) + if n > 0 { + f.peekBuff.Append(buf[:n]) + } + if err == io.ErrUnexpectedEOF { + f.size = f.peekBuff.Size() + f.Reader = f.peekBuff + return f.peekBuff, nil + } else if err != nil { + return nil, err + } + if conf.MaxBufferLimit-n > conf.MmapThreshold && conf.MmapThreshold > 0 { + m, err := mmap.Alloc(conf.MaxBufferLimit - n) + if err == nil { + f.Add(utils.CloseFunc(func() error { + return mmap.Free(m) + })) + n, err = io.ReadFull(reader, m) + if n > 0 { + f.peekBuff.Append(m[:n]) + } + if err == io.ErrUnexpectedEOF { + f.size = f.peekBuff.Size() + f.Reader = f.peekBuff + return f.peekBuff, nil + } else if err != nil { + return nil, err + } + } + } + + tmpF, err := utils.CreateTempFile(reader, 0) + if err != nil { + return nil, err + } + f.Add(utils.CloseFunc(func() error { + return errors.Join(tmpF.Close(), os.RemoveAll(tmpF.Name())) + })) + peekF, err := buffer.NewPeekFile(f.peekBuff, tmpF) + if err != nil { + return nil, err + } + f.size = peekF.Size() + f.Reader = peekF + return peekF, nil + } + f.Reader = reader return f.cache(f.GetSize()) } @@ -162,7 +216,7 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) { } size := httpRange.Start + httpRange.Length - if f.peekBuff != nil && size <= int64(f.peekBuff.Len()) { + if f.peekBuff != nil && size <= int64(f.peekBuff.Size()) { return io.NewSectionReader(f.peekBuff, httpRange.Start, httpRange.Length), nil } @@ -194,7 +248,7 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) { f.peekBuff = &buffer.Reader{} f.oriReader = f.Reader } - bufSize := maxCacheSize - int64(f.peekBuff.Len()) + bufSize := maxCacheSize - int64(f.peekBuff.Size()) var buf []byte if conf.MmapThreshold > 0 && bufSize >= int64(conf.MmapThreshold) { m, err := mmap.Alloc(int(bufSize)) @@ -213,7 +267,7 @@ func (f *FileStream) cache(maxCacheSize int64) (model.File, error) { return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", bufSize, n, err) } f.peekBuff.Append(buf) - if int64(f.peekBuff.Len()) >= f.GetSize() { + if int64(f.peekBuff.Size()) >= f.GetSize() { f.Reader = f.peekBuff f.oriReader = nil } else { diff --git a/internal/stream/util.go b/internal/stream/util.go index 20cb4be07..4f51a46d2 100644 --- a/internal/stream/util.go +++ b/internal/stream/util.go @@ -77,7 +77,7 @@ func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, response, err := net.RequestHttp(ctx, "GET", header, link.URL) if err != nil { - if _, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok { + if _, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok { return nil, err } return nil, fmt.Errorf("http request failure, err:%w", err) diff --git a/pkg/buffer/bytes.go b/pkg/buffer/bytes.go index 3ee107478..3e6cb5405 100644 --- a/pkg/buffer/bytes.go +++ b/pkg/buffer/bytes.go @@ -8,83 +8,86 @@ import ( // 用于存储不复用的[]byte type Reader struct { bufs [][]byte - length int - offset int + size int64 + offset int64 } -func (r *Reader) Len() int { - return r.length +func (r *Reader) Size() int64 { + return r.size } func (r *Reader) Append(buf []byte) { - r.length += len(buf) + r.size += int64(len(buf)) r.bufs = append(r.bufs, buf) } func (r *Reader) Read(p []byte) (int, error) { - n, err := r.ReadAt(p, int64(r.offset)) + n, err := r.ReadAt(p, r.offset) if n > 0 { - r.offset += n + r.offset += int64(n) } return n, err } func (r *Reader) ReadAt(p []byte, off int64) (int, error) { - if off < 0 || off >= int64(r.length) { + if off < 0 || off >= r.size { return 0, io.EOF } - n, length := 0, int64(0) + n := 0 readFrom := false for _, buf := range r.bufs { - newLength := length + int64(len(buf)) if readFrom { - w := copy(p[n:], buf) - n += w - } else if off < newLength { + nn := copy(p[n:], buf) + n += nn + if n == len(p) { + return n, nil + } + } else if newOff := off - int64(len(buf)); newOff >= 0 { + off = newOff + } else { + nn := copy(p, buf[off:]) + if nn == len(p) { + return nn, nil + } + n += nn readFrom = true - w := copy(p[n:], buf[int(off-length):]) - n += w } - if n == len(p) { - return n, nil - } - length = newLength } return n, io.EOF } func (r *Reader) Seek(offset int64, whence int) (int64, error) { - var abs int switch whence { case io.SeekStart: - abs = int(offset) case io.SeekCurrent: - abs = r.offset + int(offset) + offset = r.offset + offset case io.SeekEnd: - abs = r.length + int(offset) + offset = r.size + offset default: return 0, errors.New("Seek: invalid whence") } - if abs < 0 || abs > r.length { + if offset < 0 || offset > r.size { return 0, errors.New("Seek: invalid offset") } - r.offset = abs - return int64(abs), nil + r.offset = offset + return offset, nil } func (r *Reader) Reset() { clear(r.bufs) r.bufs = nil - r.length = 0 + r.size = 0 r.offset = 0 } func NewReader(buf ...[]byte) *Reader { - b := &Reader{} + b := &Reader{ + bufs: make([][]byte, 0, len(buf)), + } for _, b1 := range buf { b.Append(b1) } diff --git a/pkg/buffer/bytes_test.go b/pkg/buffer/bytes_test.go index b66af229c..3f4d85563 100644 --- a/pkg/buffer/bytes_test.go +++ b/pkg/buffer/bytes_test.go @@ -13,8 +13,7 @@ func TestReader_ReadAt(t *testing.T) { } bs := &Reader{} bs.Append([]byte("github.com")) - bs.Append([]byte("/")) - bs.Append([]byte("OpenList")) + bs.Append([]byte("/OpenList")) bs.Append([]byte("Team/")) bs.Append([]byte("OpenList")) tests := []struct { @@ -71,7 +70,7 @@ func TestReader_ReadAt(t *testing.T) { off: 24, }, want: func(a args, n int, err error) error { - if n != bs.Len()-int(a.off) { + if n != int(bs.Size()-a.off) { return errors.New("read length not match") } if string(a.p[:n]) != "OpenList" { diff --git a/pkg/buffer/file.go b/pkg/buffer/file.go new file mode 100644 index 000000000..48edf5a4c --- /dev/null +++ b/pkg/buffer/file.go @@ -0,0 +1,88 @@ +package buffer + +import ( + "errors" + "io" + "os" +) + +type PeekFile struct { + peek *Reader + file *os.File + offset int64 + size int64 +} + +func (p *PeekFile) Read(b []byte) (n int, err error) { + n, err = p.ReadAt(b, p.offset) + if n > 0 { + p.offset += int64(n) + } + return n, err +} + +func (p *PeekFile) ReadAt(b []byte, off int64) (n int, err error) { + if off < p.peek.Size() { + n, err = p.peek.ReadAt(b, off) + if err == nil || n == len(b) { + return n, nil + } + // EOF + } + var nn int + nn, err = p.file.ReadAt(b[n:], off+int64(n)-p.peek.Size()) + return n + nn, err +} + +func (p *PeekFile) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + case io.SeekCurrent: + if offset == 0 { + return p.offset, nil + } + offset = p.offset + offset + case io.SeekEnd: + offset = p.size + offset + default: + return 0, errors.New("Seek: invalid whence") + } + + if offset < 0 || offset > p.size { + return 0, errors.New("Seek: invalid offset") + } + if offset <= p.peek.Size() { + _, err := p.peek.Seek(offset, io.SeekStart) + if err != nil { + return 0, err + } + _, err = p.file.Seek(0, io.SeekStart) + if err != nil { + return 0, err + } + } else { + _, err := p.peek.Seek(p.peek.Size(), io.SeekStart) + if err != nil { + return 0, err + } + _, err = p.file.Seek(offset-p.peek.Size(), io.SeekStart) + if err != nil { + return 0, err + } + } + + p.offset = offset + return offset, nil +} + +func (p *PeekFile) Size() int64 { + return p.size +} + +func NewPeekFile(peek *Reader, file *os.File) (*PeekFile, error) { + stat, err := file.Stat() + if err == nil { + return &PeekFile{peek: peek, file: file, size: stat.Size() + peek.Size()}, nil + } + return nil, err +} diff --git a/pkg/utils/hash.go b/pkg/utils/hash.go index 0b70e4e1f..596e61e54 100644 --- a/pkg/utils/hash.go +++ b/pkg/utils/hash.go @@ -57,6 +57,11 @@ var ( Supported []*HashType ) +func GetHashByName(name string) (ht *HashType, ok bool) { + ht, ok = name2hash[name] + return +} + // RegisterHash adds a new Hash to the list and returns its Type func RegisterHash(name, alias string, width int, newFunc func() hash.Hash) *HashType { return RegisterHashWithParam(name, alias, width, func(a ...any) hash.Hash { return newFunc() }) diff --git a/pkg/utils/io.go b/pkg/utils/io.go index 172dc41c0..7ce6a9125 100644 --- a/pkg/utils/io.go +++ b/pkg/utils/io.go @@ -200,26 +200,37 @@ type SyncClosers struct { var _ SyncClosersIF = (*SyncClosers)(nil) func (c *SyncClosers) AcquireReference() bool { - ref := atomic.AddInt32(&c.ref, 1) - if ref > 0 { - // log.Debugf("SyncClosers.AcquireReference %p,ref=%d\n", c, ref) - return true + for { + ref := atomic.LoadInt32(&c.ref) + if ref < 0 { + return false + } + newRef := ref + 1 + if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) { + log.Debugf("AcquireReference %p: %d", c, newRef) + return true + } } - atomic.StoreInt32(&c.ref, math.MinInt16) - return false } func (c *SyncClosers) Close() error { - ref := atomic.AddInt32(&c.ref, -1) - if ref < -1 { - atomic.StoreInt32(&c.ref, math.MinInt16) - return nil - } - // log.Debugf("SyncClosers.Close %p,ref=%d\n", c, ref+1) - if ref > 0 { - return nil + for { + ref := atomic.LoadInt32(&c.ref) + if ref < 0 { + return nil + } + newRef := ref - 1 + if newRef <= 0 { + newRef = math.MinInt16 + } + if atomic.CompareAndSwapInt32(&c.ref, ref, newRef) { + log.Debugf("Close %p: %d", c, ref) + if newRef > 0 { + return nil + } + break + } } - atomic.StoreInt32(&c.ref, math.MinInt16) var errs []error for _, closer := range c.closers { diff --git a/server/handles/down.go b/server/handles/down.go index 62008c068..84ebdc44f 100644 --- a/server/handles/down.go +++ b/server/handles/down.go @@ -147,7 +147,7 @@ func proxy(c *gin.Context, link *model.Link, file model.Obj, proxyRange bool) { if Writer.IsWritten() { log.Errorf("%s %s local proxy error: %+v", c.Request.Method, c.Request.URL.Path, err) } else { - if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok { + if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok { common.ErrorPage(c, err, int(statusCode), true) } else { common.ErrorPage(c, err, 500, true) diff --git a/server/handles/fsread.go b/server/handles/fsread.go index b9a4f09de..6665094cd 100644 --- a/server/handles/fsread.go +++ b/server/handles/fsread.go @@ -33,18 +33,19 @@ type DirReq struct { } type ObjResp struct { - Id string `json:"id"` - Path string `json:"path"` - Name string `json:"name"` - Size int64 `json:"size"` - IsDir bool `json:"is_dir"` - Modified time.Time `json:"modified"` - Created time.Time `json:"created"` - Sign string `json:"sign"` - Thumb string `json:"thumb"` - Type int `json:"type"` - HashInfoStr string `json:"hashinfo"` - HashInfo map[*utils.HashType]string `json:"hash_info"` + Id string `json:"id"` + Path string `json:"path"` + Name string `json:"name"` + Size int64 `json:"size"` + IsDir bool `json:"is_dir"` + Modified time.Time `json:"modified"` + Created time.Time `json:"created"` + Sign string `json:"sign"` + Thumb string `json:"thumb"` + Type int `json:"type"` + HashInfoStr string `json:"hashinfo"` + HashInfo map[*utils.HashType]string `json:"hash_info"` + MountDetails *model.StorageDetailsWithName `json:"mount_details,omitempty"` } type FsListResp struct { @@ -98,7 +99,10 @@ func FsList(c *gin.Context, req *ListReq, user *model.User) { common.ErrorStrResp(c, "Refresh without permission", 403) return } - objs, err := fs.List(c.Request.Context(), reqPath, &fs.ListArgs{Refresh: req.Refresh}) + objs, err := fs.List(c.Request.Context(), reqPath, &fs.ListArgs{ + Refresh: req.Refresh, + WithStorageDetails: !user.IsGuest() && !setting.GetBool(conf.HideStorageDetails), + }) if err != nil { common.ErrorResp(c, err, 500) return @@ -224,19 +228,21 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp { var resp []ObjResp for _, obj := range objs { thumb, _ := model.GetThumb(obj) + mountDetails, _ := model.GetStorageDetails(obj) resp = append(resp, ObjResp{ - Id: obj.GetID(), - Path: obj.GetPath(), - Name: obj.GetName(), - Size: obj.GetSize(), - IsDir: obj.IsDir(), - Modified: obj.ModTime(), - Created: obj.CreateTime(), - HashInfoStr: obj.GetHash().String(), - HashInfo: obj.GetHash().Export(), - Sign: common.Sign(obj, parent, encrypt), - Thumb: thumb, - Type: utils.GetObjType(obj.GetName(), obj.IsDir()), + Id: obj.GetID(), + Path: obj.GetPath(), + Name: obj.GetName(), + Size: obj.GetSize(), + IsDir: obj.IsDir(), + Modified: obj.ModTime(), + Created: obj.CreateTime(), + HashInfoStr: obj.GetHash().String(), + HashInfo: obj.GetHash().Export(), + Sign: common.Sign(obj, parent, encrypt), + Thumb: thumb, + Type: utils.GetObjType(obj.GetName(), obj.IsDir()), + MountDetails: mountDetails, }) } return resp @@ -293,7 +299,9 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) { common.ErrorStrResp(c, "password is incorrect or you have no permission", 403) return } - obj, err := fs.Get(c.Request.Context(), reqPath, &fs.GetArgs{}) + obj, err := fs.Get(c.Request.Context(), reqPath, &fs.GetArgs{ + WithStorageDetails: !user.IsGuest() && !setting.GetBool(conf.HideStorageDetails), + }) if err != nil { common.ErrorResp(c, err, 500) return @@ -301,8 +309,8 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) { var rawURL string storage, err := fs.GetStorage(reqPath, &fs.GetStoragesArgs{}) - provider := "unknown" - if err == nil { + provider, ok := model.GetProvider(obj) + if !ok && err == nil { provider = storage.Config().Name } if !obj.IsDir() { @@ -350,20 +358,22 @@ func FsGet(c *gin.Context, req *FsGetReq, user *model.User) { } parentMeta, _ := op.GetNearestMeta(parentPath) thumb, _ := model.GetThumb(obj) + mountDetails, _ := model.GetStorageDetails(obj) common.SuccessResp(c, FsGetResp{ ObjResp: ObjResp{ - Id: obj.GetID(), - Path: obj.GetPath(), - Name: obj.GetName(), - Size: obj.GetSize(), - IsDir: obj.IsDir(), - Modified: obj.ModTime(), - Created: obj.CreateTime(), - HashInfoStr: obj.GetHash().String(), - HashInfo: obj.GetHash().Export(), - Sign: common.Sign(obj, parentPath, isEncrypt(meta, reqPath)), - Type: utils.GetFileType(obj.GetName()), - Thumb: thumb, + Id: obj.GetID(), + Path: obj.GetPath(), + Name: obj.GetName(), + Size: obj.GetSize(), + IsDir: obj.IsDir(), + Modified: obj.ModTime(), + Created: obj.CreateTime(), + HashInfoStr: obj.GetHash().String(), + HashInfo: obj.GetHash().Export(), + Sign: common.Sign(obj, parentPath, isEncrypt(meta, reqPath)), + Type: utils.GetFileType(obj.GetName()), + Thumb: thumb, + MountDetails: mountDetails, }, RawURL: rawURL, Readme: getReadme(meta, reqPath), diff --git a/server/handles/fsup.go b/server/handles/fsup.go index 25a59e511..bddb1051e 100644 --- a/server/handles/fsup.go +++ b/server/handles/fsup.go @@ -59,14 +59,17 @@ func FsStream(c *gin.Context) { } } dir, name := stdpath.Split(path) - sizeStr := c.GetHeader("Content-Length") - if sizeStr == "" { - sizeStr = "0" - } - size, err := strconv.ParseInt(sizeStr, 10, 64) - if err != nil { - common.ErrorResp(c, err, 400) - return + // 如果请求头 Content-Length 和 X-File-Size 都没有,则 size=-1,表示未知大小的流式上传 + size := c.Request.ContentLength + if size < 0 { + sizeStr := c.GetHeader("X-File-Size") + if sizeStr != "" { + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + common.ErrorResp(c, err, 400) + return + } + } } h := make(map[*utils.HashType]string) if md5 := c.GetHeader("X-File-Md5"); md5 != "" { diff --git a/server/handles/storage.go b/server/handles/storage.go index 70b9e1ad0..c67bbcc0d 100644 --- a/server/handles/storage.go +++ b/server/handles/storage.go @@ -3,9 +3,11 @@ package handles import ( "context" "strconv" + "sync" "github.com/OpenListTeam/OpenList/v4/internal/conf" "github.com/OpenListTeam/OpenList/v4/internal/db" + "github.com/OpenListTeam/OpenList/v4/internal/driver" "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/op" "github.com/OpenListTeam/OpenList/v4/server/common" @@ -13,6 +15,42 @@ import ( log "github.com/sirupsen/logrus" ) +type StorageResp struct { + model.Storage + MountDetails *model.StorageDetails `json:"mount_details,omitempty"` +} + +func makeStorageResp(c *gin.Context, storages []model.Storage) []*StorageResp { + ret := make([]*StorageResp, len(storages)) + var wg sync.WaitGroup + for i, s := range storages { + ret[i] = &StorageResp{ + Storage: s, + MountDetails: nil, + } + d, err := op.GetStorageByMountPath(s.MountPath) + if err != nil { + continue + } + wd, ok := d.(driver.WithDetails) + if !ok { + continue + } + wg.Add(1) + go func() { + defer wg.Done() + details, err := wd.GetDetails(c) + if err != nil { + log.Errorf("failed get %s details: %+v", s.MountPath, err) + return + } + ret[i].MountDetails = details + }() + } + wg.Wait() + return ret +} + func ListStorages(c *gin.Context) { var req model.PageReq if err := c.ShouldBind(&req); err != nil { @@ -27,7 +65,7 @@ func ListStorages(c *gin.Context) { return } common.SuccessResp(c, common.PageResp{ - Content: storages, + Content: makeStorageResp(c, storages), Total: total, }) } diff --git a/server/webdav/webdav.go b/server/webdav/webdav.go index b6f7cdac8..0c4f0922c 100644 --- a/server/webdav/webdav.go +++ b/server/webdav/webdav.go @@ -14,6 +14,7 @@ import ( "net/url" "os" "path" + "strconv" "strings" "time" @@ -271,7 +272,7 @@ func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (sta } err = common.Proxy(w, r, link, fi) if err != nil { - if statusCode, ok := errors.Unwrap(err).(net.ErrorHttpStatusCode); ok { + if statusCode, ok := errors.Unwrap(err).(net.HttpStatusCodeError); ok { return int(statusCode), err } return http.StatusInternalServerError, fmt.Errorf("webdav proxy error: %+v", err) @@ -341,9 +342,19 @@ func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, if err != nil { return http.StatusForbidden, err } + size := r.ContentLength + if size < 0 { + sizeStr := r.Header.Get("X-File-Size") + if sizeStr != "" { + size, err = strconv.ParseInt(sizeStr, 10, 64) + if err != nil { + return http.StatusBadRequest, err + } + } + } obj := model.Object{ Name: path.Base(reqPath), - Size: r.ContentLength, + Size: size, Modified: h.getModTime(r), Ctime: h.getCreateTime(r), } From 5087eb9ef0b9d58b9f77f3a5baf9c7fc65a1f97b Mon Sep 17 00:00:00 2001 From: Suyunmeng Date: Tue, 23 Sep 2025 13:14:46 +0800 Subject: [PATCH 26/26] fix: remove duplicate method declarations --- drivers/123_open/driver.go | 15 --------------- drivers/baidu_netdisk/driver.go | 8 -------- drivers/baidu_netdisk/types.go | 2 ++ 3 files changed, 2 insertions(+), 23 deletions(-) diff --git a/drivers/123_open/driver.go b/drivers/123_open/driver.go index 59da8d35e..fa279d5da 100644 --- a/drivers/123_open/driver.go +++ b/drivers/123_open/driver.go @@ -313,20 +313,5 @@ func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) }, nil } -func (d *Open123) GetDetails(ctx context.Context) (*model.StorageDetails, error) { - userInfo, err := d.getUserInfo() - if err != nil { - return nil, err - } - total := userInfo.Data.SpacePermanent + userInfo.Data.SpaceTemp - free := total - userInfo.Data.SpaceUsed - return &model.StorageDetails{ - DiskUsage: model.DiskUsage{ - TotalSpace: total, - FreeSpace: free, - }, - }, nil -} - var _ driver.Driver = (*Open123)(nil) var _ driver.PutResult = (*Open123)(nil) diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 7a9beda63..9c39fdcd1 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -468,12 +468,4 @@ func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, e return &model.StorageDetails{DiskUsage: *du}, nil } -func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) { - du, err := d.quota() - if err != nil { - return nil, err - } - return &model.StorageDetails{DiskUsage: *du}, nil -} - var _ driver.Driver = (*BaiduNetdisk)(nil) diff --git a/drivers/baidu_netdisk/types.go b/drivers/baidu_netdisk/types.go index 594de4863..187198d6f 100644 --- a/drivers/baidu_netdisk/types.go +++ b/drivers/baidu_netdisk/types.go @@ -234,6 +234,8 @@ type SliceUpCompleteResp struct { Ctime uint64 `json:"ctime"` // 文件创建时间 Mtime uint64 `json:"mtime"` // 文件修改时间 Isdir int `json:"isdir"` // 是否目录,0 文件、1 目录 +} + type QuotaResp struct { Errno int `json:"errno"` RequestId int64 `json:"request_id"`