From a3ea72ce18bdefe5a7e74135718694a660fad280 Mon Sep 17 00:00:00 2001 From: markrieder <160418822+markrieder@users.noreply.github.com> Date: Wed, 2 Oct 2024 11:00:36 +0200 Subject: [PATCH] drive update --- drivers/Mega | 1 - drivers/Mega cloud/driver.go.txt | 195 ++++++++++++++++++ drivers/Mega cloud/meta.go.txt | 28 +++ drivers/Mega cloud/types.go.txt | 48 +++++ drivers/Mega cloud/util.go.txt | 92 +++++++++ drivers/all.go | 13 -- drivers/ftp/driver.go.txt | 126 ++++++++++++ drivers/ftp/meta.go.txt | 44 ++++ drivers/ftp/types.go.txt | 1 + drivers/ftp/util.go.txt | 116 +++++++++++ drivers/s3/doge.go.txt | 63 ++++++ drivers/s3/driver.go.txt | 180 +++++++++++++++++ drivers/s3/meta.go.txt | 46 +++++ drivers/s3/types.go.txt | 1 + drivers/s3/util.go.txt | 257 ++++++++++++++++++++++++ drivers/sftp/driver.go.txt | 118 +++++++++++ drivers/sftp/meta.go.txt | 30 +++ drivers/sftp/types.go.txt | 53 +++++ drivers/sftp/util.go.txt | 96 +++++++++ drivers/smb/driver.go.txt | 200 ++++++++++++++++++ drivers/smb/meta.go.txt | 28 +++ drivers/smb/types.go.txt | 1 + drivers/smb/util.go.txt | 138 +++++++++++++ drivers/webdav/driver.go.txt | 106 ++++++++++ drivers/webdav/meta.go.txt | 28 +++ drivers/webdav/odrvcookie/cookie.go.txt | 46 +++++ drivers/webdav/odrvcookie/fetch.go.txt | 207 +++++++++++++++++++ drivers/webdav/types.go.txt | 1 + drivers/webdav/util.go.txt | 52 +++++ 29 files changed, 2301 insertions(+), 14 deletions(-) delete mode 100644 drivers/Mega create mode 100644 drivers/Mega cloud/driver.go.txt create mode 100644 drivers/Mega cloud/meta.go.txt create mode 100644 drivers/Mega cloud/types.go.txt create mode 100644 drivers/Mega cloud/util.go.txt delete mode 100644 drivers/all.go create mode 100644 drivers/ftp/driver.go.txt create mode 100644 drivers/ftp/meta.go.txt create mode 100644 drivers/ftp/types.go.txt create mode 100644 drivers/ftp/util.go.txt create mode 100644 drivers/s3/doge.go.txt create mode 100644 drivers/s3/driver.go.txt create mode 100644 drivers/s3/meta.go.txt create mode 100644 drivers/s3/types.go.txt create mode 100644 drivers/s3/util.go.txt create mode 100644 drivers/sftp/driver.go.txt create mode 100644 drivers/sftp/meta.go.txt create mode 100644 drivers/sftp/types.go.txt create mode 100644 drivers/sftp/util.go.txt create mode 100644 drivers/smb/driver.go.txt create mode 100644 drivers/smb/meta.go.txt create mode 100644 drivers/smb/types.go.txt create mode 100644 drivers/smb/util.go.txt create mode 100644 drivers/webdav/driver.go.txt create mode 100644 drivers/webdav/meta.go.txt create mode 100644 drivers/webdav/odrvcookie/cookie.go.txt create mode 100644 drivers/webdav/odrvcookie/fetch.go.txt create mode 100644 drivers/webdav/types.go.txt create mode 100644 drivers/webdav/util.go.txt diff --git a/drivers/Mega b/drivers/Mega deleted file mode 100644 index 8b13789..0000000 --- a/drivers/Mega +++ /dev/null @@ -1 +0,0 @@ - diff --git a/drivers/Mega cloud/driver.go.txt b/drivers/Mega cloud/driver.go.txt new file mode 100644 index 0000000..162aeef --- /dev/null +++ b/drivers/Mega cloud/driver.go.txt @@ -0,0 +1,195 @@ +package mega + +import ( + "context" + "errors" + "fmt" + "io" + "time" + + "github.com/alist-org/alist/v3/pkg/http_range" + "github.com/pquerna/otp/totp" + "github.com/rclone/rclone/lib/readers" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + log "github.com/sirupsen/logrus" + "github.com/t3rm1n4l/go-mega" +) + +type Mega struct { + model.Storage + Addition + c *mega.Mega +} + +func (d *Mega) Config() driver.Config { + return config +} + +func (d *Mega) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *Mega) Init(ctx context.Context) error { + var twoFACode = d.TwoFACode + d.c = mega.New() + if d.TwoFASecret != "" { + code, err := totp.GenerateCode(d.TwoFASecret, time.Now()) + if err != nil { + return fmt.Errorf("generate totp code failed: %w", err) + } + twoFACode = code + } + return d.c.MultiFactorLogin(d.Email, d.Password, twoFACode) +} + +func (d *Mega) Drop(ctx context.Context) error { + return nil +} + +func (d *Mega) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + if node, ok := dir.(*MegaNode); ok { + nodes, err := d.c.FS.GetChildren(node.n) + if err != nil { + return nil, err + } + res := make([]model.Obj, 0) + for i := range nodes { + n := nodes[i] + if n.GetType() == mega.FILE || n.GetType() == mega.FOLDER { + res = append(res, &MegaNode{n}) + } + } + return res, nil + } + log.Errorf("can't convert: %+v", dir) + return nil, fmt.Errorf("unable to convert dir to mega n") +} + +func (d *Mega) GetRoot(ctx context.Context) (model.Obj, error) { + n := d.c.FS.GetRoot() + log.Debugf("mega root: %+v", *n) + return &MegaNode{n}, nil +} + +func (d *Mega) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + if node, ok := file.(*MegaNode); ok { + + //down, err := d.c.NewDownload(n.Node) + //if err != nil { + // return nil, fmt.Errorf("open download file failed: %w", err) + //} + + size := file.GetSize() + var finalClosers utils.Closers + resultRangeReader := func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) { + length := httpRange.Length + if httpRange.Length >= 0 && httpRange.Start+httpRange.Length >= size { + length = -1 + } + var down *mega.Download + err := utils.Retry(3, time.Second, func() (err error) { + down, err = d.c.NewDownload(node.n) + return err + }) + if err != nil { + return nil, fmt.Errorf("open download file failed: %w", err) + } + oo := &openObject{ + ctx: ctx, + d: down, + skip: httpRange.Start, + } + finalClosers.Add(oo) + + return readers.NewLimitedReadCloser(oo, length), nil + } + resultRangeReadCloser := &model.RangeReadCloser{RangeReader: resultRangeReader, Closers: finalClosers} + resultLink := &model.Link{ + RangeReadCloser: resultRangeReadCloser, + } + return resultLink, nil + } + return nil, fmt.Errorf("unable to convert dir to mega n") +} + +func (d *Mega) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + if parentNode, ok := parentDir.(*MegaNode); ok { + _, err := d.c.CreateDir(dirName, parentNode.n) + return err + } + return fmt.Errorf("unable to convert dir to mega n") +} + +func (d *Mega) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + if srcNode, ok := srcObj.(*MegaNode); ok { + if dstNode, ok := dstDir.(*MegaNode); ok { + return d.c.Move(srcNode.n, dstNode.n) + } + } + return fmt.Errorf("unable to convert dir to mega n") +} + +func (d *Mega) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + if srcNode, ok := srcObj.(*MegaNode); ok { + return d.c.Rename(srcNode.n, newName) + } + return fmt.Errorf("unable to convert dir to mega n") +} + +func (d *Mega) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + return errs.NotImplement +} + +func (d *Mega) Remove(ctx context.Context, obj model.Obj) error { + if node, ok := obj.(*MegaNode); ok { + return d.c.Delete(node.n, false) + } + return fmt.Errorf("unable to convert dir to mega n") +} + +func (d *Mega) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { + if dstNode, ok := dstDir.(*MegaNode); ok { + u, err := d.c.NewUpload(dstNode.n, stream.GetName(), stream.GetSize()) + if err != nil { + return err + } + + for id := 0; id < u.Chunks(); id++ { + if utils.IsCanceled(ctx) { + return ctx.Err() + } + _, chkSize, err := u.ChunkLocation(id) + if err != nil { + return err + } + chunk := make([]byte, chkSize) + n, err := io.ReadFull(stream, chunk) + if err != nil && err != io.EOF { + return err + } + if n != len(chunk) { + return errors.New("chunk too short") + } + + err = u.UploadChunk(id, chunk) + if err != nil { + return err + } + up(float64(id) * 100 / float64(u.Chunks())) + } + + _, err = u.Finish() + return err + } + return fmt.Errorf("unable to convert dir to mega n") +} + +//func (d *Mega) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { +// return nil, errs.NotSupport +//} + +var _ driver.Driver = (*Mega)(nil) diff --git a/drivers/Mega cloud/meta.go.txt b/drivers/Mega cloud/meta.go.txt new file mode 100644 index 0000000..d075863 --- /dev/null +++ b/drivers/Mega cloud/meta.go.txt @@ -0,0 +1,28 @@ +package mega + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + // Usually one of two + //driver.RootPath + //driver.RootID + Email string `json:"email" required:"true"` + Password string `json:"password" required:"true"` + TwoFACode string `json:"two_fa_code" required:"false" help:"2FA 6-digit code, filling in the 2FA code alone will not support reloading driver"` + TwoFASecret string `json:"two_fa_secret" required:"false" help:"2FA secret"` +} + +var config = driver.Config{ + Name: "Mega_nz", + LocalSort: true, + OnlyLocal: true, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &Mega{} + }) +} diff --git a/drivers/Mega cloud/types.go.txt b/drivers/Mega cloud/types.go.txt new file mode 100644 index 0000000..3046d44 --- /dev/null +++ b/drivers/Mega cloud/types.go.txt @@ -0,0 +1,48 @@ +package mega + +import ( + "github.com/alist-org/alist/v3/pkg/utils" + "time" + + "github.com/alist-org/alist/v3/internal/model" + "github.com/t3rm1n4l/go-mega" +) + +type MegaNode struct { + n *mega.Node +} + +func (m *MegaNode) GetSize() int64 { + return m.n.GetSize() +} + +func (m *MegaNode) GetName() string { + return m.n.GetName() +} + +func (m *MegaNode) CreateTime() time.Time { + return m.n.GetTimeStamp() +} + +func (m *MegaNode) GetHash() utils.HashInfo { + //Meganz use md5, but can't get the original file hash, due to it's encrypted in the cloud + return utils.HashInfo{} +} + +func (m *MegaNode) ModTime() time.Time { + return m.n.GetTimeStamp() +} + +func (m *MegaNode) IsDir() bool { + return m.n.GetType() == mega.FOLDER || m.n.GetType() == mega.ROOT +} + +func (m *MegaNode) GetID() string { + return m.n.GetHash() +} + +func (m *MegaNode) GetPath() string { + return "" +} + +var _ model.Obj = (*MegaNode)(nil) diff --git a/drivers/Mega cloud/util.go.txt b/drivers/Mega cloud/util.go.txt new file mode 100644 index 0000000..f5ad254 --- /dev/null +++ b/drivers/Mega cloud/util.go.txt @@ -0,0 +1,92 @@ +package mega + +import ( + "context" + "fmt" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/t3rm1n4l/go-mega" + "io" + "sync" + "time" +) + +// do others that not defined in Driver interface +// openObject represents a download in progress +type openObject struct { + ctx context.Context + mu sync.Mutex + d *mega.Download + id int + skip int64 + chunk []byte + closed bool +} + +// get the next chunk +func (oo *openObject) getChunk(ctx context.Context) (err error) { + if oo.id >= oo.d.Chunks() { + return io.EOF + } + var chunk []byte + err = utils.Retry(3, time.Second, func() (err error) { + chunk, err = oo.d.DownloadChunk(oo.id) + return err + }) + if err != nil { + return err + } + oo.id++ + oo.chunk = chunk + return nil +} + +// Read reads up to len(p) bytes into p. +func (oo *openObject) Read(p []byte) (n int, err error) { + oo.mu.Lock() + defer oo.mu.Unlock() + if oo.closed { + return 0, fmt.Errorf("read on closed file") + } + // Skip data at the start if requested + for oo.skip > 0 { + _, size, err := oo.d.ChunkLocation(oo.id) + if err != nil { + return 0, err + } + if oo.skip < int64(size) { + break + } + oo.id++ + oo.skip -= int64(size) + } + if len(oo.chunk) == 0 { + err = oo.getChunk(oo.ctx) + if err != nil { + return 0, err + } + if oo.skip > 0 { + oo.chunk = oo.chunk[oo.skip:] + oo.skip = 0 + } + } + n = copy(p, oo.chunk) + oo.chunk = oo.chunk[n:] + return n, nil +} + +// Close closed the file - MAC errors are reported here +func (oo *openObject) Close() (err error) { + oo.mu.Lock() + defer oo.mu.Unlock() + if oo.closed { + return nil + } + err = utils.Retry(3, 500*time.Millisecond, func() (err error) { + return oo.d.Finish() + }) + if err != nil { + return fmt.Errorf("failed to finish download: %w", err) + } + oo.closed = true + return nil +} diff --git a/drivers/all.go b/drivers/all.go deleted file mode 100644 index 57a885f..0000000 --- a/drivers/all.go +++ /dev/null @@ -1,13 +0,0 @@ -package drivers - -import ( - _ "github.com/IceWhaleTech/CasaOS/drivers/dropbox" - _ "github.com/IceWhaleTech/CasaOS/drivers/google_drive" - _ "github.com/IceWhaleTech/CasaOS/drivers/onedrive" -) - -// All do nothing,just for import -// same as _ import -func All() { - -} diff --git a/drivers/ftp/driver.go.txt b/drivers/ftp/driver.go.txt new file mode 100644 index 0000000..05b9e49 --- /dev/null +++ b/drivers/ftp/driver.go.txt @@ -0,0 +1,126 @@ +package ftp + +import ( + "context" + stdpath "path" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/jlaffaye/ftp" +) + +type FTP struct { + model.Storage + Addition + conn *ftp.ServerConn +} + +func (d *FTP) Config() driver.Config { + return config +} + +func (d *FTP) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *FTP) Init(ctx context.Context) error { + return d.login() +} + +func (d *FTP) Drop(ctx context.Context) error { + if d.conn != nil { + _ = d.conn.Logout() + } + return nil +} + +func (d *FTP) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + if err := d.login(); err != nil { + return nil, err + } + entries, err := d.conn.List(encode(dir.GetPath(), d.Encoding)) + if err != nil { + return nil, err + } + res := make([]model.Obj, 0) + for _, entry := range entries { + if entry.Name == "." || entry.Name == ".." { + continue + } + f := model.Object{ + Name: decode(entry.Name, d.Encoding), + Size: int64(entry.Size), + Modified: entry.Time, + IsFolder: entry.Type == ftp.EntryTypeFolder, + } + res = append(res, &f) + } + return res, nil +} + +func (d *FTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + if err := d.login(); err != nil { + return nil, err + } + + r := NewFileReader(d.conn, encode(file.GetPath(), d.Encoding), file.GetSize()) + link := &model.Link{ + MFile: r, + } + return link, nil +} + +func (d *FTP) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + if err := d.login(); err != nil { + return err + } + return d.conn.MakeDir(encode(stdpath.Join(parentDir.GetPath(), dirName), d.Encoding)) +} + +func (d *FTP) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + if err := d.login(); err != nil { + return err + } + return d.conn.Rename( + encode(srcObj.GetPath(), d.Encoding), + encode(stdpath.Join(dstDir.GetPath(), srcObj.GetName()), d.Encoding), + ) +} + +func (d *FTP) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + if err := d.login(); err != nil { + return err + } + return d.conn.Rename( + encode(srcObj.GetPath(), d.Encoding), + encode(stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName), d.Encoding), + ) +} + +func (d *FTP) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + return errs.NotSupport +} + +func (d *FTP) Remove(ctx context.Context, obj model.Obj) error { + if err := d.login(); err != nil { + return err + } + path := encode(obj.GetPath(), d.Encoding) + if obj.IsDir() { + return d.conn.RemoveDirRecur(path) + } else { + return d.conn.Delete(path) + } +} + +func (d *FTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { + if err := d.login(); err != nil { + return err + } + // TODO: support cancel + path := stdpath.Join(dstDir.GetPath(), stream.GetName()) + return d.conn.Stor(encode(path, d.Encoding), stream) +} + +var _ driver.Driver = (*FTP)(nil) diff --git a/drivers/ftp/meta.go.txt b/drivers/ftp/meta.go.txt new file mode 100644 index 0000000..5652c12 --- /dev/null +++ b/drivers/ftp/meta.go.txt @@ -0,0 +1,44 @@ +package ftp + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" + "github.com/axgle/mahonia" +) + +func encode(str string, encoding string) string { + if encoding == "" { + return str + } + encoder := mahonia.NewEncoder(encoding) + return encoder.ConvertString(str) +} + +func decode(str string, encoding string) string { + if encoding == "" { + return str + } + decoder := mahonia.NewDecoder(encoding) + return decoder.ConvertString(str) +} + +type Addition struct { + Address string `json:"address" required:"true"` + Encoding string `json:"encoding" required:"true"` + Username string `json:"username" required:"true"` + Password string `json:"password" required:"true"` + driver.RootPath +} + +var config = driver.Config{ + Name: "FTP", + LocalSort: true, + OnlyLocal: true, + DefaultRoot: "/", +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &FTP{} + }) +} diff --git a/drivers/ftp/types.go.txt b/drivers/ftp/types.go.txt new file mode 100644 index 0000000..4c98203 --- /dev/null +++ b/drivers/ftp/types.go.txt @@ -0,0 +1 @@ +package ftp diff --git a/drivers/ftp/util.go.txt b/drivers/ftp/util.go.txt new file mode 100644 index 0000000..196d874 --- /dev/null +++ b/drivers/ftp/util.go.txt @@ -0,0 +1,116 @@ +package ftp + +import ( + "io" + "os" + "sync" + "sync/atomic" + "time" + + "github.com/jlaffaye/ftp" +) + +// do others that not defined in Driver interface + +func (d *FTP) login() error { + if d.conn != nil { + _, err := d.conn.CurrentDir() + if err == nil { + return nil + } + } + conn, err := ftp.Dial(d.Address, ftp.DialWithShutTimeout(10*time.Second)) + if err != nil { + return err + } + err = conn.Login(d.Username, d.Password) + if err != nil { + return err + } + d.conn = conn + return nil +} + +// FileReader An FTP file reader that implements io.MFile for seeking. +type FileReader struct { + conn *ftp.ServerConn + resp *ftp.Response + offset atomic.Int64 + readAtOffset int64 + mu sync.Mutex + path string + size int64 +} + +func NewFileReader(conn *ftp.ServerConn, path string, size int64) *FileReader { + return &FileReader{ + conn: conn, + path: path, + size: size, + } +} + +func (r *FileReader) Read(buf []byte) (n int, err error) { + n, err = r.ReadAt(buf, r.offset.Load()) + r.offset.Add(int64(n)) + return +} + +func (r *FileReader) ReadAt(buf []byte, off int64) (n int, err error) { + if off < 0 { + return -1, os.ErrInvalid + } + r.mu.Lock() + defer r.mu.Unlock() + + if off != r.readAtOffset { + //have to restart the connection, to correct offset + _ = r.resp.Close() + r.resp = nil + } + + if r.resp == nil { + r.resp, err = r.conn.RetrFrom(r.path, uint64(off)) + r.readAtOffset = off + if err != nil { + return 0, err + } + } + + n, err = r.resp.Read(buf) + r.readAtOffset += int64(n) + return +} + +func (r *FileReader) Seek(offset int64, whence int) (int64, error) { + oldOffset := r.offset.Load() + var newOffset int64 + switch whence { + case io.SeekStart: + newOffset = offset + case io.SeekCurrent: + newOffset = oldOffset + offset + case io.SeekEnd: + return r.size, nil + default: + return -1, os.ErrInvalid + } + + if newOffset < 0 { + // offset out of range + return oldOffset, os.ErrInvalid + } + if newOffset == oldOffset { + // offset not changed, so return directly + return oldOffset, nil + } + r.offset.Store(newOffset) + return newOffset, nil +} + +func (r *FileReader) Close() error { + if r.resp != nil { + return r.resp.Close() + } + return nil +} diff --git a/drivers/s3/doge.go.txt b/drivers/s3/doge.go.txt new file mode 100644 index 0000000..12a584c --- /dev/null +++ b/drivers/s3/doge.go.txt @@ -0,0 +1,63 @@ +package s3 + +import ( + "crypto/hmac" + "crypto/sha1" + "encoding/hex" + "encoding/json" + "io" + "net/http" + "strings" +) + +type TmpTokenResponse struct { + Code int `json:"code"` + Msg string `json:"msg"` + Data TmpTokenResponseData `json:"data,omitempty"` +} +type TmpTokenResponseData struct { + Credentials Credentials `json:"Credentials"` + ExpiredAt int `json:"ExpiredAt"` +} +type Credentials struct { + AccessKeyId string `json:"accessKeyId,omitempty"` + SecretAccessKey string `json:"secretAccessKey,omitempty"` + SessionToken string `json:"sessionToken,omitempty"` +} + +func getCredentials(AccessKey, SecretKey string) (rst Credentials, err error) { + apiPath := "/auth/tmp_token.json" + reqBody, err := json.Marshal(map[string]interface{}{"channel": "OSS_FULL", "scopes": []string{"*"}}) + if err != nil { + return rst, err + } + + signStr := apiPath + "\n" + string(reqBody) + hmacObj := hmac.New(sha1.New, []byte(SecretKey)) + hmacObj.Write([]byte(signStr)) + sign := hex.EncodeToString(hmacObj.Sum(nil)) + Authorization := "TOKEN " + AccessKey + ":" + sign + + req, err := http.NewRequest("POST", "https://api.dogecloud.com"+apiPath, strings.NewReader(string(reqBody))) + if err != nil { + return rst, err + } + req.Header.Add("Content-Type", "application/json") + req.Header.Add("Authorization", Authorization) + client := http.Client{} + resp, err := client.Do(req) + if err != nil { + return rst, err + } + defer resp.Body.Close() + ret, err := io.ReadAll(resp.Body) + if err != nil { + return rst, err + } + var tmpTokenResp TmpTokenResponse + err = json.Unmarshal(ret, &tmpTokenResp) + if err != nil { + return rst, err + } + return tmpTokenResp.Data.Credentials, nil +} diff --git a/drivers/s3/driver.go.txt b/drivers/s3/driver.go.txt new file mode 100644 index 0000000..2b72d78 --- /dev/null +++ b/drivers/s3/driver.go.txt @@ -0,0 +1,180 @@ +package s3 + +import ( + "bytes" + "context" + "fmt" + "github.com/alist-org/alist/v3/server/common" + "io" + "net/url" + stdpath "path" + "strings" + "time" + + "github.com/alist-org/alist/v3/internal/stream" + "github.com/alist-org/alist/v3/pkg/cron" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + log "github.com/sirupsen/logrus" +) + +type S3 struct { + model.Storage + Addition + Session *session.Session + client *s3.S3 + linkClient *s3.S3 + + config driver.Config + cron *cron.Cron +} + +func (d *S3) Config() driver.Config { + return d.config +} + +func (d *S3) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *S3) Init(ctx context.Context) error { + if d.Region == "" { + d.Region = "alist" + } + if d.config.Name == "Doge" { + // 多吉云每次临时生成的秘钥有效期为 2h,所以这里设置为 118 分钟重新生成一次 + d.cron = cron.NewCron(time.Minute * 118) + d.cron.Do(func() { + err := d.initSession() + if err != nil { + log.Errorln("Doge init session error:", err) + } + d.client = d.getClient(false) + d.linkClient = d.getClient(true) + }) + } + err := d.initSession() + if err != nil { + return err + } + d.client = d.getClient(false) + d.linkClient = d.getClient(true) + return nil +} + +func (d *S3) Drop(ctx context.Context) error { + if d.cron != nil { + d.cron.Stop() + } + return nil +} + +func (d *S3) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + if d.ListObjectVersion == "v2" { + return d.listV2(dir.GetPath(), args) + } + return d.listV1(dir.GetPath(), args) +} + +func (d *S3) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + path := getKey(file.GetPath(), false) + filename := stdpath.Base(path) + disposition := fmt.Sprintf(`attachment; filename*=UTF-8''%s`, url.PathEscape(filename)) + if d.AddFilenameToDisposition { + disposition = fmt.Sprintf(`attachment; filename="%s"; filename*=UTF-8''%s`, filename, url.PathEscape(filename)) + } + input := &s3.GetObjectInput{ + Bucket: &d.Bucket, + Key: &path, + //ResponseContentDisposition: &disposition, + } + if d.CustomHost == "" { + input.ResponseContentDisposition = &disposition + } + req, _ := d.linkClient.GetObjectRequest(input) + var link model.Link + var err error + if d.CustomHost != "" { + err = req.Build() + link.URL = req.HTTPRequest.URL.String() + if d.RemoveBucket { + link.URL = strings.Replace(link.URL, "/"+d.Bucket, "", 1) + } + } else { + if common.ShouldProxy(d, filename) { + err = req.Sign() + link.URL = req.HTTPRequest.URL.String() + link.Header = req.HTTPRequest.Header + } else { + link.URL, err = req.Presign(time.Hour * time.Duration(d.SignURLExpire)) + } + } + if err != nil { + return nil, err + } + return &link, nil +} + +func (d *S3) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + return d.Put(ctx, &model.Object{ + Path: stdpath.Join(parentDir.GetPath(), dirName), + }, &stream.FileStream{ + Obj: &model.Object{ + Name: getPlaceholderName(d.Placeholder), + Modified: time.Now(), + }, + Reader: io.NopCloser(bytes.NewReader([]byte{})), + Mimetype: "application/octet-stream", + }, func(float64) {}) +} + +func (d *S3) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + err := d.Copy(ctx, srcObj, dstDir) + if err != nil { + return err + } + return d.Remove(ctx, srcObj) +} + +func (d *S3) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + err := d.copy(ctx, srcObj.GetPath(), stdpath.Join(stdpath.Dir(srcObj.GetPath()), newName), srcObj.IsDir()) + if err != nil { + return err + } + return d.Remove(ctx, srcObj) +} + +func (d *S3) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + return d.copy(ctx, srcObj.GetPath(), stdpath.Join(dstDir.GetPath(), srcObj.GetName()), srcObj.IsDir()) +} + +func (d *S3) Remove(ctx context.Context, obj model.Obj) error { + if obj.IsDir() { + return d.removeDir(ctx, obj.GetPath()) + } + return d.removeFile(obj.GetPath()) +} + +func (d *S3) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { + uploader := s3manager.NewUploader(d.Session) + if stream.GetSize() > s3manager.MaxUploadParts*s3manager.DefaultUploadPartSize { + uploader.PartSize = stream.GetSize() / (s3manager.MaxUploadParts - 1) + } + key := getKey(stdpath.Join(dstDir.GetPath(), stream.GetName()), false) + contentType := stream.GetMimetype() + log.Debugln("key:", key) + input := &s3manager.UploadInput{ + Bucket: &d.Bucket, + Key: &key, + Body: stream, + ContentType: &contentType, + } + _, err := uploader.UploadWithContext(ctx, input) + return err +} + +var _ driver.Driver = (*S3)(nil) diff --git a/drivers/s3/meta.go.txt b/drivers/s3/meta.go.txt new file mode 100644 index 0000000..4436c61 --- /dev/null +++ b/drivers/s3/meta.go.txt @@ -0,0 +1,46 @@ +package s3 + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + driver.RootPath + Bucket string `json:"bucket" required:"true"` + Endpoint string `json:"endpoint" required:"true"` + Region string `json:"region"` + AccessKeyID string `json:"access_key_id" required:"true"` + SecretAccessKey string `json:"secret_access_key" required:"true"` + SessionToken string `json:"session_token"` + CustomHost string `json:"custom_host"` + SignURLExpire int `json:"sign_url_expire" type:"number" default:"4"` + Placeholder string `json:"placeholder"` + ForcePathStyle bool `json:"force_path_style"` + ListObjectVersion string `json:"list_object_version" type:"select" options:"v1,v2" default:"v1"` + RemoveBucket bool `json:"remove_bucket" help:"Remove bucket name from path when using custom host."` + AddFilenameToDisposition bool `json:"add_filename_to_disposition" help:"Add filename to Content-Disposition header."` +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &S3{ + config: driver.Config{ + Name: "S3", + DefaultRoot: "/", + LocalSort: true, + CheckStatus: true, + }, + } + }) + op.RegisterDriver(func() driver.Driver { + return &S3{ + config: driver.Config{ + Name: "Doge", + DefaultRoot: "/", + LocalSort: true, + CheckStatus: true, + }, + } + }) +} diff --git a/drivers/s3/types.go.txt b/drivers/s3/types.go.txt new file mode 100644 index 0000000..3ed7f97 --- /dev/null +++ b/drivers/s3/types.go.txt @@ -0,0 +1 @@ +package s3 diff --git a/drivers/s3/util.go.txt b/drivers/s3/util.go.txt new file mode 100644 index 0000000..31e658b --- /dev/null +++ b/drivers/s3/util.go.txt @@ -0,0 +1,257 @@ +package s3 + +import ( + "context" + "errors" + "net/http" + "path" + "strings" + + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/internal/op" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + log "github.com/sirupsen/logrus" +) + +// do others that not defined in Driver interface + +func (d *S3) initSession() error { + var err error + accessKeyID, secretAccessKey, sessionToken := d.AccessKeyID, d.SecretAccessKey, d.SessionToken + if d.config.Name == "Doge" { + credentialsTmp, err := getCredentials(d.AccessKeyID, d.SecretAccessKey) + if err != nil { + return err + } + accessKeyID, secretAccessKey, sessionToken = credentialsTmp.AccessKeyId, credentialsTmp.SecretAccessKey, credentialsTmp.SessionToken + } + cfg := &aws.Config{ + Credentials: credentials.NewStaticCredentials(accessKeyID, secretAccessKey, sessionToken), + Region: &d.Region, + Endpoint: &d.Endpoint, + S3ForcePathStyle: aws.Bool(d.ForcePathStyle), + } + d.Session, err = session.NewSession(cfg) + return err +} + +func (d *S3) getClient(link bool) *s3.S3 { + client := s3.New(d.Session) + if link && d.CustomHost != "" { + client.Handlers.Build.PushBack(func(r *request.Request) { + if r.HTTPRequest.Method != http.MethodGet { + return + } + //判断CustomHost是否以http://或https://开头 + split := strings.SplitN(d.CustomHost, "://", 2) + if utils.SliceContains([]string{"http", "https"}, split[0]) { + r.HTTPRequest.URL.Scheme = split[0] + r.HTTPRequest.URL.Host = split[1] + } else { + r.HTTPRequest.URL.Host = d.CustomHost + } + }) + } + return client +} + +func getKey(path string, dir bool) string { + path = strings.TrimPrefix(path, "/") + if path != "" && dir { + path += "/" + } + return path +} + +var defaultPlaceholderName = ".alist" + +func getPlaceholderName(placeholder string) string { + if placeholder == "" { + return defaultPlaceholderName + } + return placeholder +} + +func (d *S3) listV1(prefix string, args model.ListArgs) ([]model.Obj, error) { + prefix = getKey(prefix, true) + log.Debugf("list: %s", prefix) + files := make([]model.Obj, 0) + marker := "" + for { + input := &s3.ListObjectsInput{ + Bucket: &d.Bucket, + Marker: &marker, + Prefix: &prefix, + Delimiter: aws.String("/"), + } + listObjectsResult, err := d.client.ListObjects(input) + if err != nil { + return nil, err + } + for _, object := range listObjectsResult.CommonPrefixes { + name := path.Base(strings.Trim(*object.Prefix, "/")) + file := model.Object{ + //Id: *object.Key, + Name: name, + Modified: d.Modified, + IsFolder: true, + } + files = append(files, &file) + } + for _, object := range listObjectsResult.Contents { + name := path.Base(*object.Key) + if !args.S3ShowPlaceholder && (name == getPlaceholderName(d.Placeholder) || name == d.Placeholder) { + continue + } + file := model.Object{ + //Id: *object.Key, + Name: name, + Size: *object.Size, + Modified: *object.LastModified, + } + files = append(files, &file) + } + if listObjectsResult.IsTruncated == nil { + return nil, errors.New("IsTruncated nil") + } + if *listObjectsResult.IsTruncated { + marker = *listObjectsResult.NextMarker + } else { + break + } + } + return files, nil +} + +func (d *S3) listV2(prefix string, args model.ListArgs) ([]model.Obj, error) { + prefix = getKey(prefix, true) + files := make([]model.Obj, 0) + var continuationToken, startAfter *string + for { + input := &s3.ListObjectsV2Input{ + Bucket: &d.Bucket, + ContinuationToken: continuationToken, + Prefix: &prefix, + Delimiter: aws.String("/"), + StartAfter: startAfter, + } + listObjectsResult, err := d.client.ListObjectsV2(input) + if err != nil { + return nil, err + } + log.Debugf("resp: %+v", listObjectsResult) + for _, object := range listObjectsResult.CommonPrefixes { + name := path.Base(strings.Trim(*object.Prefix, "/")) + file := model.Object{ + //Id: *object.Key, + Name: name, + Modified: d.Modified, + IsFolder: true, + } + files = append(files, &file) + } + for _, object := range listObjectsResult.Contents { + if strings.HasSuffix(*object.Key, "/") { + continue + } + name := path.Base(*object.Key) + if !args.S3ShowPlaceholder && (name == getPlaceholderName(d.Placeholder) || name == d.Placeholder) { + continue + } + file := model.Object{ + //Id: *object.Key, + Name: name, + Size: *object.Size, + Modified: *object.LastModified, + } + files = append(files, &file) + } + if !aws.BoolValue(listObjectsResult.IsTruncated) { + break + } + if listObjectsResult.NextContinuationToken != nil { + continuationToken = listObjectsResult.NextContinuationToken + continue + } + if len(listObjectsResult.Contents) == 0 { + break + } + startAfter = listObjectsResult.Contents[len(listObjectsResult.Contents)-1].Key + } + return files, nil +} + +func (d *S3) copy(ctx context.Context, src string, dst string, isDir bool) error { + if isDir { + return d.copyDir(ctx, src, dst) + } + return d.copyFile(ctx, src, dst) +} + +func (d *S3) copyFile(ctx context.Context, src string, dst string) error { + srcKey := getKey(src, false) + dstKey := getKey(dst, false) + input := &s3.CopyObjectInput{ + Bucket: &d.Bucket, + CopySource: aws.String("/" + d.Bucket + "/" + srcKey), + Key: &dstKey, + } + _, err := d.client.CopyObject(input) + return err +} + +func (d *S3) copyDir(ctx context.Context, src string, dst string) error { + objs, err := op.List(ctx, d, src, model.ListArgs{S3ShowPlaceholder: true}) + if err != nil { + return err + } + for _, obj := range objs { + cSrc := path.Join(src, obj.GetName()) + cDst := path.Join(dst, obj.GetName()) + if obj.IsDir() { + err = d.copyDir(ctx, cSrc, cDst) + } else { + err = d.copyFile(ctx, cSrc, cDst) + } + if err != nil { + return err + } + } + return nil +} + +func (d *S3) removeDir(ctx context.Context, src string) error { + objs, err := op.List(ctx, d, src, model.ListArgs{}) + if err != nil { + return err + } + for _, obj := range objs { + cSrc := path.Join(src, obj.GetName()) + if obj.IsDir() { + err = d.removeDir(ctx, cSrc) + } else { + err = d.removeFile(cSrc) + } + if err != nil { + return err + } + } + _ = d.removeFile(path.Join(src, getPlaceholderName(d.Placeholder))) + _ = d.removeFile(path.Join(src, d.Placeholder)) + return nil +} + +func (d *S3) removeFile(src string) error { + key := getKey(src, false) + input := &s3.DeleteObjectInput{ + Bucket: &d.Bucket, + Key: &key, + } + _, err := d.client.DeleteObject(input) + return err +} diff --git a/drivers/sftp/driver.go.txt b/drivers/sftp/driver.go.txt new file mode 100644 index 0000000..1f21659 --- /dev/null +++ b/drivers/sftp/driver.go.txt @@ -0,0 +1,118 @@ +package sftp + +import ( + "context" + "os" + "path" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/errs" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + "github.com/pkg/sftp" + log "github.com/sirupsen/logrus" +) + +type SFTP struct { + model.Storage + Addition + client *sftp.Client + clientConnectionError error +} + +func (d *SFTP) Config() driver.Config { + return config +} + +func (d *SFTP) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *SFTP) Init(ctx context.Context) error { + return d.initClient() +} + +func (d *SFTP) Drop(ctx context.Context) error { + if d.client != nil { + _ = d.client.Close() + } + return nil +} + +func (d *SFTP) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + if err := d.clientReconnectOnConnectionError(); err != nil { + return nil, err + } + log.Debugf("[sftp] list dir: %s", dir.GetPath()) + files, err := d.client.ReadDir(dir.GetPath()) + if err != nil { + return nil, err + } + objs, err := utils.SliceConvert(files, func(src os.FileInfo) (model.Obj, error) { + return d.fileToObj(src, dir.GetPath()) + }) + return objs, err +} + +func (d *SFTP) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + if err := d.clientReconnectOnConnectionError(); err != nil { + return nil, err + } + remoteFile, err := d.client.Open(file.GetPath()) + if err != nil { + return nil, err + } + link := &model.Link{ + MFile: remoteFile, + } + return link, nil +} + +func (d *SFTP) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + if err := d.clientReconnectOnConnectionError(); err != nil { + return err + } + return d.client.MkdirAll(path.Join(parentDir.GetPath(), dirName)) +} + +func (d *SFTP) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + if err := d.clientReconnectOnConnectionError(); err != nil { + return err + } + return d.client.Rename(srcObj.GetPath(), path.Join(dstDir.GetPath(), srcObj.GetName())) +} + +func (d *SFTP) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + if err := d.clientReconnectOnConnectionError(); err != nil { + return err + } + return d.client.Rename(srcObj.GetPath(), path.Join(path.Dir(srcObj.GetPath()), newName)) +} + +func (d *SFTP) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + return errs.NotSupport +} + +func (d *SFTP) Remove(ctx context.Context, obj model.Obj) error { + if err := d.clientReconnectOnConnectionError(); err != nil { + return err + } + return d.remove(obj.GetPath()) +} + +func (d *SFTP) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { + if err := d.clientReconnectOnConnectionError(); err != nil { + return err + } + dstFile, err := d.client.Create(path.Join(dstDir.GetPath(), stream.GetName())) + if err != nil { + return err + } + defer func() { + _ = dstFile.Close() + }() + err = utils.CopyWithCtx(ctx, dstFile, stream, stream.GetSize(), up) + return err +} + +var _ driver.Driver = (*SFTP)(nil) diff --git a/drivers/sftp/meta.go.txt b/drivers/sftp/meta.go.txt new file mode 100644 index 0000000..9b16656 --- /dev/null +++ b/drivers/sftp/meta.go.txt @@ -0,0 +1,30 @@ +package sftp + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + Address string `json:"address" required:"true"` + Username string `json:"username" required:"true"` + PrivateKey string `json:"private_key" type:"text"` + Password string `json:"password"` + Passphrase string `json:"passphrase"` + driver.RootPath + IgnoreSymlinkError bool `json:"ignore_symlink_error" default:"false" info:"Ignore symlink error"` +} + +var config = driver.Config{ + Name: "SFTP", + LocalSort: true, + OnlyLocal: true, + DefaultRoot: "/", + CheckStatus: true, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &SFTP{} + }) +} diff --git a/drivers/sftp/types.go.txt b/drivers/sftp/types.go.txt new file mode 100644 index 0000000..493e884 --- /dev/null +++ b/drivers/sftp/types.go.txt @@ -0,0 +1,53 @@ +package sftp + +import ( + "os" + stdpath "path" + "strings" + + "github.com/alist-org/alist/v3/internal/model" + log "github.com/sirupsen/logrus" +) + +func (d *SFTP) fileToObj(f os.FileInfo, dir string) (model.Obj, error) { + symlink := f.Mode()&os.ModeSymlink != 0 + if !symlink { + return &model.Object{ + Name: f.Name(), + Size: f.Size(), + Modified: f.ModTime(), + IsFolder: f.IsDir(), + }, nil + } + path := stdpath.Join(dir, f.Name()) + // set target path + target, err := d.client.ReadLink(path) + if err != nil { + return nil, err + } + if !strings.HasPrefix(target, "/") { + target = stdpath.Join(dir, target) + } + _f, err := d.client.Stat(target) + if err != nil { + if d.IgnoreSymlinkError { + return &model.Object{ + Name: f.Name(), + Size: f.Size(), + Modified: f.ModTime(), + IsFolder: f.IsDir(), + }, nil + } + return nil, err + } + // set basic info + obj := &model.Object{ + Name: f.Name(), + Size: _f.Size(), + Modified: _f.ModTime(), + IsFolder: _f.IsDir(), + Path: target, + } + log.Debugf("[sftp] obj: %+v, is symlink: %v", obj, symlink) + return obj, nil +} diff --git a/drivers/sftp/util.go.txt b/drivers/sftp/util.go.txt new file mode 100644 index 0000000..53f9c37 --- /dev/null +++ b/drivers/sftp/util.go.txt @@ -0,0 +1,96 @@ +package sftp + +import ( + "path" + + "github.com/pkg/sftp" + log "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh" +) + +// do others that not defined in Driver interface + +func (d *SFTP) initClient() error { + var auth ssh.AuthMethod + if len(d.PrivateKey) > 0 { + var err error + var signer ssh.Signer + if len(d.Passphrase) > 0 { + signer, err = ssh.ParsePrivateKeyWithPassphrase([]byte(d.PrivateKey), []byte(d.Passphrase)) + } else { + signer, err = ssh.ParsePrivateKey([]byte(d.PrivateKey)) + } + if err != nil { + return err + } + auth = ssh.PublicKeys(signer) + } else { + auth = ssh.Password(d.Password) + } + config := &ssh.ClientConfig{ + User: d.Username, + Auth: []ssh.AuthMethod{auth}, + HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + conn, err := ssh.Dial("tcp", d.Address, config) + if err != nil { + return err + } + d.client, err = sftp.NewClient(conn) + if err == nil { + d.clientConnectionError = nil + go func(d *SFTP) { + d.clientConnectionError = d.client.Wait() + }(d) + } + return err +} + +func (d *SFTP) clientReconnectOnConnectionError() error { + err := d.clientConnectionError + if err == nil { + return nil + } + log.Debugf("[sftp] discarding closed sftp connection: %v", err) + _ = d.client.Close() + err = d.initClient() + return err +} + +func (d *SFTP) remove(remotePath string) error { + f, err := d.client.Stat(remotePath) + if err != nil { + return nil + } + if f.IsDir() { + return d.removeDirectory(remotePath) + } else { + return d.removeFile(remotePath) + } +} + +func (d *SFTP) removeDirectory(remotePath string) error { + remoteFiles, err := d.client.ReadDir(remotePath) + if err != nil { + return err + } + for _, backupDir := range remoteFiles { + remoteFilePath := path.Join(remotePath, backupDir.Name()) + if backupDir.IsDir() { + err := d.removeDirectory(remoteFilePath) + if err != nil { + return err + } + } else { + err := d.removeFile(remoteFilePath) + if err != nil { + return err + } + } + } + return d.client.RemoveDirectory(remotePath) +} + +func (d *SFTP) removeFile(remotePath string) error { + return d.client.Remove(path.Join(remotePath)) +} diff --git a/drivers/smb/driver.go.txt b/drivers/smb/driver.go.txt new file mode 100644 index 0000000..9632f24 --- /dev/null +++ b/drivers/smb/driver.go.txt @@ -0,0 +1,200 @@ +package smb + +import ( + "context" + "errors" + "path/filepath" + "strings" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/utils" + + "github.com/hirochachacha/go-smb2" +) + +type SMB struct { + lastConnTime int64 + model.Storage + Addition + fs *smb2.Share +} + +func (d *SMB) Config() driver.Config { + return config +} + +func (d *SMB) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *SMB) Init(ctx context.Context) error { + if strings.Index(d.Addition.Address, ":") < 0 { + d.Addition.Address = d.Addition.Address + ":445" + } + return d.initFS() +} + +func (d *SMB) Drop(ctx context.Context) error { + if d.fs != nil { + _ = d.fs.Umount() + } + return nil +} + +func (d *SMB) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + if err := d.checkConn(); err != nil { + return nil, err + } + fullPath := dir.GetPath() + rawFiles, err := d.fs.ReadDir(fullPath) + if err != nil { + d.cleanLastConnTime() + return nil, err + } + d.updateLastConnTime() + var files []model.Obj + for _, f := range rawFiles { + file := model.ObjThumb{ + Object: model.Object{ + Name: f.Name(), + Modified: f.ModTime(), + Size: f.Size(), + IsFolder: f.IsDir(), + Ctime: f.(*smb2.FileStat).CreationTime, + }, + } + files = append(files, &file) + } + return files, nil +} + +func (d *SMB) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + if err := d.checkConn(); err != nil { + return nil, err + } + fullPath := file.GetPath() + remoteFile, err := d.fs.Open(fullPath) + if err != nil { + d.cleanLastConnTime() + return nil, err + } + link := &model.Link{ + MFile: remoteFile, + } + d.updateLastConnTime() + return link, nil +} + +func (d *SMB) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + if err := d.checkConn(); err != nil { + return err + } + fullPath := filepath.Join(parentDir.GetPath(), dirName) + err := d.fs.MkdirAll(fullPath, 0700) + if err != nil { + d.cleanLastConnTime() + return err + } + d.updateLastConnTime() + return nil +} + +func (d *SMB) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + if err := d.checkConn(); err != nil { + return err + } + srcPath := srcObj.GetPath() + dstPath := filepath.Join(dstDir.GetPath(), srcObj.GetName()) + err := d.fs.Rename(srcPath, dstPath) + if err != nil { + d.cleanLastConnTime() + return err + } + d.updateLastConnTime() + return nil +} + +func (d *SMB) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + if err := d.checkConn(); err != nil { + return err + } + srcPath := srcObj.GetPath() + dstPath := filepath.Join(filepath.Dir(srcPath), newName) + err := d.fs.Rename(srcPath, dstPath) + if err != nil { + d.cleanLastConnTime() + return err + } + d.updateLastConnTime() + return nil +} + +func (d *SMB) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + if err := d.checkConn(); err != nil { + return err + } + srcPath := srcObj.GetPath() + dstPath := filepath.Join(dstDir.GetPath(), srcObj.GetName()) + var err error + if srcObj.IsDir() { + err = d.CopyDir(srcPath, dstPath) + } else { + err = d.CopyFile(srcPath, dstPath) + } + if err != nil { + d.cleanLastConnTime() + return err + } + d.updateLastConnTime() + return nil +} + +func (d *SMB) Remove(ctx context.Context, obj model.Obj) error { + if err := d.checkConn(); err != nil { + return err + } + var err error + fullPath := obj.GetPath() + if obj.IsDir() { + err = d.fs.RemoveAll(fullPath) + } else { + err = d.fs.Remove(fullPath) + } + if err != nil { + d.cleanLastConnTime() + return err + } + d.updateLastConnTime() + return nil +} + +func (d *SMB) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { + if err := d.checkConn(); err != nil { + return err + } + fullPath := filepath.Join(dstDir.GetPath(), stream.GetName()) + out, err := d.fs.Create(fullPath) + if err != nil { + d.cleanLastConnTime() + return err + } + d.updateLastConnTime() + defer func() { + _ = out.Close() + if errors.Is(err, context.Canceled) { + _ = d.fs.Remove(fullPath) + } + }() + err = utils.CopyWithCtx(ctx, out, stream, stream.GetSize(), up) + if err != nil { + return err + } + return nil +} + +//func (d *SMB) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { +// return nil, errs.NotSupport +//} + +var _ driver.Driver = (*SMB)(nil) diff --git a/drivers/smb/meta.go.txt b/drivers/smb/meta.go.txt new file mode 100644 index 0000000..3386db2 --- /dev/null +++ b/drivers/smb/meta.go.txt @@ -0,0 +1,28 @@ +package smb + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + driver.RootPath + Address string `json:"address" required:"true"` + Username string `json:"username" required:"true"` + Password string `json:"password"` + ShareName string `json:"share_name" required:"true"` +} + +var config = driver.Config{ + Name: "SMB", + LocalSort: true, + OnlyLocal: true, + DefaultRoot: ".", + NoCache: true, +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &SMB{} + }) +} diff --git a/drivers/smb/types.go.txt b/drivers/smb/types.go.txt new file mode 100644 index 0000000..161798a --- /dev/null +++ b/drivers/smb/types.go.txt @@ -0,0 +1 @@ +package smb diff --git a/drivers/smb/util.go.txt b/drivers/smb/util.go.txt new file mode 100644 index 0000000..d9fbf6c --- /dev/null +++ b/drivers/smb/util.go.txt @@ -0,0 +1,138 @@ +package smb + +import ( + "github.com/alist-org/alist/v3/pkg/utils" + "io/fs" + "net" + "os" + "path/filepath" + "sync/atomic" + "time" + + "github.com/hirochachacha/go-smb2" +) + +func (d *SMB) updateLastConnTime() { + atomic.StoreInt64(&d.lastConnTime, time.Now().Unix()) +} + +func (d *SMB) cleanLastConnTime() { + atomic.StoreInt64(&d.lastConnTime, 0) +} + +func (d *SMB) getLastConnTime() time.Time { + return time.Unix(atomic.LoadInt64(&d.lastConnTime), 0) +} + +func (d *SMB) initFS() error { + conn, err := net.Dial("tcp", d.Address) + if err != nil { + return err + } + dialer := &smb2.Dialer{ + Initiator: &smb2.NTLMInitiator{ + User: d.Username, + Password: d.Password, + }, + } + s, err := dialer.Dial(conn) + if err != nil { + return err + } + d.fs, err = s.Mount(d.ShareName) + if err != nil { + return err + } + d.updateLastConnTime() + return err +} + +func (d *SMB) checkConn() error { + if time.Since(d.getLastConnTime()) < 5*time.Minute { + return nil + } + if d.fs != nil { + _ = d.fs.Umount() + } + return d.initFS() +} + +// CopyFile File copies a single file from src to dst +func (d *SMB) CopyFile(src, dst string) error { + var err error + var srcfd *smb2.File + var dstfd *smb2.File + var srcinfo fs.FileInfo + + if srcfd, err = d.fs.Open(src); err != nil { + return err + } + defer srcfd.Close() + + if dstfd, err = d.CreateNestedFile(dst); err != nil { + return err + } + defer dstfd.Close() + + if _, err = utils.CopyWithBuffer(dstfd, srcfd); err != nil { + return err + } + if srcinfo, err = d.fs.Stat(src); err != nil { + return err + } + return d.fs.Chmod(dst, srcinfo.Mode()) +} + +// CopyDir Dir copies a whole directory recursively +func (d *SMB) CopyDir(src string, dst string) error { + var err error + var fds []fs.FileInfo + var srcinfo fs.FileInfo + + if srcinfo, err = d.fs.Stat(src); err != nil { + return err + } + if err = d.fs.MkdirAll(dst, srcinfo.Mode()); err != nil { + return err + } + if fds, err = d.fs.ReadDir(src); err != nil { + return err + } + for _, fd := range fds { + srcfp := filepath.Join(src, fd.Name()) + dstfp := filepath.Join(dst, fd.Name()) + + if fd.IsDir() { + if err = d.CopyDir(srcfp, dstfp); err != nil { + return err + } + } else { + if err = d.CopyFile(srcfp, dstfp); err != nil { + return err + } + } + } + return nil +} + +// Exists determine whether the file exists +func (d *SMB) Exists(name string) bool { + if _, err := d.fs.Stat(name); err != nil { + if os.IsNotExist(err) { + return false + } + } + return true +} + +// CreateNestedFile create nested file +func (d *SMB) CreateNestedFile(path string) (*smb2.File, error) { + basePath := filepath.Dir(path) + if !d.Exists(basePath) { + err := d.fs.MkdirAll(basePath, 0700) + if err != nil { + return nil, err + } + } + return d.fs.Create(path) +} diff --git a/drivers/webdav/driver.go.txt b/drivers/webdav/driver.go.txt new file mode 100644 index 0000000..b402b1d --- /dev/null +++ b/drivers/webdav/driver.go.txt @@ -0,0 +1,106 @@ +package webdav + +import ( + "context" + "net/http" + "os" + "path" + "time" + + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/cron" + "github.com/alist-org/alist/v3/pkg/gowebdav" + "github.com/alist-org/alist/v3/pkg/utils" +) + +type WebDav struct { + model.Storage + Addition + client *gowebdav.Client + cron *cron.Cron +} + +func (d *WebDav) Config() driver.Config { + return config +} + +func (d *WebDav) GetAddition() driver.Additional { + return &d.Addition +} + +func (d *WebDav) Init(ctx context.Context) error { + err := d.setClient() + if err == nil { + d.cron = cron.NewCron(time.Hour * 12) + d.cron.Do(func() { + _ = d.setClient() + }) + } + return err +} + +func (d *WebDav) Drop(ctx context.Context) error { + if d.cron != nil { + d.cron.Stop() + } + return nil +} + +func (d *WebDav) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) { + files, err := d.client.ReadDir(dir.GetPath()) + if err != nil { + return nil, err + } + return utils.SliceConvert(files, func(src os.FileInfo) (model.Obj, error) { + return &model.Object{ + Name: src.Name(), + Size: src.Size(), + Modified: src.ModTime(), + IsFolder: src.IsDir(), + }, nil + }) +} + +func (d *WebDav) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) { + url, header, err := d.client.Link(file.GetPath()) + if err != nil { + return nil, err + } + return &model.Link{ + URL: url, + Header: header, + }, nil +} + +func (d *WebDav) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error { + return d.client.MkdirAll(path.Join(parentDir.GetPath(), dirName), 0644) +} + +func (d *WebDav) Move(ctx context.Context, srcObj, dstDir model.Obj) error { + return d.client.Rename(getPath(srcObj), path.Join(dstDir.GetPath(), srcObj.GetName()), true) +} + +func (d *WebDav) Rename(ctx context.Context, srcObj model.Obj, newName string) error { + return d.client.Rename(getPath(srcObj), path.Join(path.Dir(srcObj.GetPath()), newName), true) +} + +func (d *WebDav) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { + return d.client.Copy(getPath(srcObj), path.Join(dstDir.GetPath(), srcObj.GetName()), true) +} + +func (d *WebDav) Remove(ctx context.Context, obj model.Obj) error { + return d.client.RemoveAll(getPath(obj)) +} + +func (d *WebDav) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error { + callback := func(r *http.Request) { + r.Header.Set("Content-Type", stream.GetMimetype()) + r.ContentLength = stream.GetSize() + } + // TODO: support cancel + err := d.client.WriteStream(path.Join(dstDir.GetPath(), stream.GetName()), stream, 0644, callback) + return err +} + +var _ driver.Driver = (*WebDav)(nil) diff --git a/drivers/webdav/meta.go.txt b/drivers/webdav/meta.go.txt new file mode 100644 index 0000000..2294d48 --- /dev/null +++ b/drivers/webdav/meta.go.txt @@ -0,0 +1,28 @@ +package webdav + +import ( + "github.com/alist-org/alist/v3/internal/driver" + "github.com/alist-org/alist/v3/internal/op" +) + +type Addition struct { + Vendor string `json:"vendor" type:"select" options:"sharepoint,other" default:"other"` + Address string `json:"address" required:"true"` + Username string `json:"username" required:"true"` + Password string `json:"password" required:"true"` + driver.RootPath + TlsInsecureSkipVerify bool `json:"tls_insecure_skip_verify" default:"false"` +} + +var config = driver.Config{ + Name: "WebDav", + LocalSort: true, + OnlyProxy: true, + DefaultRoot: "/", +} + +func init() { + op.RegisterDriver(func() driver.Driver { + return &WebDav{} + }) +} diff --git a/drivers/webdav/odrvcookie/cookie.go.txt b/drivers/webdav/odrvcookie/cookie.go.txt new file mode 100644 index 0000000..bcd4f3b --- /dev/null +++ b/drivers/webdav/odrvcookie/cookie.go.txt @@ -0,0 +1,46 @@ +package odrvcookie + +import ( + "net/http" + + "github.com/alist-org/alist/v3/pkg/cookie" +) + +//type SpCookie struct { +// Cookie string +// expire time.Time +//} +// +//func (sp SpCookie) IsExpire() bool { +// return time.Now().After(sp.expire) +//} +// +//var cookiesMap = struct { +// sync.Mutex +// m map[string]*SpCookie +//}{m: make(map[string]*SpCookie)} + +func GetCookie(username, password, siteUrl string) (string, error) { + //cookiesMap.Lock() + //defer cookiesMap.Unlock() + //spCookie, ok := cookiesMap.m[username] + //if ok { + // if !spCookie.IsExpire() { + // log.Debugln("sp use old cookie.") + // return spCookie.Cookie, nil + // } + //} + //log.Debugln("fetch new cookie") + ca := New(username, password, siteUrl) + tokenConf, err := ca.Cookies() + if err != nil { + return "", err + } + return cookie.ToString([]*http.Cookie{&tokenConf.RtFa, &tokenConf.FedAuth}), nil + //spCookie = &SpCookie{ + // Cookie: cookie.ToString([]*http.Cookie{&tokenConf.RtFa, &tokenConf.FedAuth}), + // expire: time.Now().Add(time.Hour * 12), + //} + //cookiesMap.m[username] = spCookie + //return spCookie.Cookie, nil +} diff --git a/drivers/webdav/odrvcookie/fetch.go.txt b/drivers/webdav/odrvcookie/fetch.go.txt new file mode 100644 index 0000000..a52fc68 --- /dev/null +++ b/drivers/webdav/odrvcookie/fetch.go.txt @@ -0,0 +1,207 @@ +// Package odrvcookie can fetch authentication cookies for a sharepoint webdav endpoint +package odrvcookie + +import ( + "bytes" + "encoding/xml" + "fmt" + "html/template" + "net/http" + "net/http/cookiejar" + "net/url" + "strings" + "time" + + "github.com/alist-org/alist/v3/drivers/base" + "golang.org/x/net/publicsuffix" +) + +// CookieAuth hold the authentication information +// These are username and password as well as the authentication endpoint +type CookieAuth struct { + user string + pass string + endpoint string +} + +// CookieResponse contains the requested cookies +type CookieResponse struct { + RtFa http.Cookie + FedAuth http.Cookie +} + +// SuccessResponse hold a response from the sharepoint webdav +type SuccessResponse struct { + XMLName xml.Name `xml:"Envelope"` + Succ SuccessResponseBody `xml:"Body"` +} + +// SuccessResponseBody is the body of a success response, it holds the token +type SuccessResponseBody struct { + XMLName xml.Name + Type string `xml:"RequestSecurityTokenResponse>TokenType"` + Created time.Time `xml:"RequestSecurityTokenResponse>Lifetime>Created"` + Expires time.Time `xml:"RequestSecurityTokenResponse>Lifetime>Expires"` + Token string `xml:"RequestSecurityTokenResponse>RequestedSecurityToken>BinarySecurityToken"` +} + +// reqString is a template that gets populated with the user data in order to retrieve a "BinarySecurityToken" +const reqString = ` + +http://schemas.xmlsoap.org/ws/2005/02/trust/RST/Issue + +http://www.w3.org/2005/08/addressing/anonymous + +{{ .LoginUrl }} + + + {{ .Username }} + {{ .Password }} + + + + + + + + {{ .Address }} + + +http://schemas.xmlsoap.org/ws/2005/05/identity/NoProofKey +http://schemas.xmlsoap.org/ws/2005/02/trust/Issue +urn:oasis:names:tc:SAML:1.0:assertion + + +` + +// New creates a new CookieAuth struct +func New(pUser, pPass, pEndpoint string) CookieAuth { + retStruct := CookieAuth{ + user: pUser, + pass: pPass, + endpoint: pEndpoint, + } + + return retStruct +} + +// Cookies creates a CookieResponse. It fetches the auth token and then +// retrieves the Cookies +func (ca *CookieAuth) Cookies() (CookieResponse, error) { + spToken, err := ca.getSPToken() + if err != nil { + return CookieResponse{}, err + } + return ca.getSPCookie(spToken) +} + +func (ca *CookieAuth) getSPCookie(conf *SuccessResponse) (CookieResponse, error) { + spRoot, err := url.Parse(ca.endpoint) + if err != nil { + return CookieResponse{}, err + } + + u, err := url.Parse("https://" + spRoot.Host + "/_forms/default.aspx?wa=wsignin1.0") + if err != nil { + return CookieResponse{}, err + } + + // To authenticate with davfs or anything else we need two cookies (rtFa and FedAuth) + // In order to get them we use the token we got earlier and a cookieJar + jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) + if err != nil { + return CookieResponse{}, err + } + + client := &http.Client{ + Jar: jar, + } + + // Send the previously acquired Token as a Post parameter + if _, err = client.Post(u.String(), "text/xml", strings.NewReader(conf.Succ.Token)); err != nil { + return CookieResponse{}, err + } + + cookieResponse := CookieResponse{} + for _, cookie := range jar.Cookies(u) { + if (cookie.Name == "rtFa") || (cookie.Name == "FedAuth") { + switch cookie.Name { + case "rtFa": + cookieResponse.RtFa = *cookie + case "FedAuth": + cookieResponse.FedAuth = *cookie + } + } + } + return cookieResponse, err +} + +var loginUrlsMap = map[string]string{ + "com": "https://login.microsoftonline.com", + "cn": "https://login.chinacloudapi.cn", + "us": "https://login.microsoftonline.us", + "de": "https://login.microsoftonline.de", +} + +func getLoginUrl(endpoint string) (string, error) { + spRoot, err := url.Parse(endpoint) + if err != nil { + return "", err + } + domains := strings.Split(spRoot.Host, ".") + tld := domains[len(domains)-1] + loginUrl, ok := loginUrlsMap[tld] + if !ok { + return "", fmt.Errorf("tld %s is not supported", tld) + } + return loginUrl + "/extSTS.srf", nil +} + +func (ca *CookieAuth) getSPToken() (*SuccessResponse, error) { + loginUrl, err := getLoginUrl(ca.endpoint) + if err != nil { + return nil, err + } + reqData := map[string]string{ + "Username": ca.user, + "Password": ca.pass, + "Address": ca.endpoint, + "LoginUrl": loginUrl, + } + + t := template.Must(template.New("authXML").Parse(reqString)) + + buf := &bytes.Buffer{} + if err := t.Execute(buf, reqData); err != nil { + return nil, err + } + + // Execute the first request which gives us an auth token for the sharepoint service + // With this token we can authenticate on the login page and save the returned cookies + req, err := http.NewRequest("POST", loginUrl, buf) + if err != nil { + return nil, err + } + + client := base.HttpClient + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + respBuf := bytes.Buffer{} + respBuf.ReadFrom(resp.Body) + s := respBuf.Bytes() + + var conf SuccessResponse + err = xml.Unmarshal(s, &conf) + if err != nil { + return nil, err + } + + return &conf, err +} diff --git a/drivers/webdav/types.go.txt b/drivers/webdav/types.go.txt new file mode 100644 index 0000000..0541cc2 --- /dev/null +++ b/drivers/webdav/types.go.txt @@ -0,0 +1 @@ +package webdav diff --git a/drivers/webdav/util.go.txt b/drivers/webdav/util.go.txt new file mode 100644 index 0000000..23dc909 --- /dev/null +++ b/drivers/webdav/util.go.txt @@ -0,0 +1,52 @@ +package webdav + +import ( + "crypto/tls" + "net/http" + "net/http/cookiejar" + + "github.com/alist-org/alist/v3/drivers/webdav/odrvcookie" + "github.com/alist-org/alist/v3/internal/model" + "github.com/alist-org/alist/v3/pkg/gowebdav" +) + +// do others that not defined in Driver interface + +func (d *WebDav) isSharepoint() bool { + return d.Vendor == "sharepoint" +} + +func (d *WebDav) setClient() error { + c := gowebdav.NewClient(d.Address, d.Username, d.Password) + c.SetTransport(&http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: &tls.Config{InsecureSkipVerify: d.TlsInsecureSkipVerify}, + }) + if d.isSharepoint() { + cookie, err := odrvcookie.GetCookie(d.Username, d.Password, d.Address) + if err == nil { + c.SetInterceptor(func(method string, rq *http.Request) { + rq.Header.Del("Authorization") + rq.Header.Set("Cookie", cookie) + }) + } else { + return err + } + } else { + cookieJar, err := cookiejar.New(nil) + if err == nil { + c.SetJar(cookieJar) + } else { + return err + } + } + d.client = c + return nil +} + +func getPath(obj model.Obj) string { + if obj.IsDir() { + return obj.GetPath() + "/" + } + return obj.GetPath() +}