mirror of
https://github.com/IceWhaleTech/CasaOS.git
synced 2025-12-23 04:54:41 +00:00
Compare commits
2 Commits
v0.4.2-alp
...
v0.3.7-alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a96f6189ac | ||
|
|
e164a5d4c6 |
14
.github/ISSUE_TEMPLATE/bug_report.md
vendored
14
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@@ -28,19 +28,5 @@ If applicable, add screenshots to help explain your problem.
|
|||||||
- Browser [e.g. chrome, safari]
|
- Browser [e.g. chrome, safari]
|
||||||
- Version [e.g. 22]
|
- Version [e.g. 22]
|
||||||
|
|
||||||
**Logs**
|
|
||||||
|
|
||||||
run following command to collect corresponding logs:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
sudo journalctl -xef -u casaos-gateway
|
|
||||||
sudo journalctl -xef -u casaos-user-service
|
|
||||||
sudo journalctl -xef -u casaos-local-storage
|
|
||||||
sudo journalctl -xef -u casaos.service
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
**Additional context**
|
**Additional context**
|
||||||
Add any other context about the problem here.
|
Add any other context about the problem here.
|
||||||
|
|||||||
23
.github/ISSUE_TEMPLATE/feedback.yml
vendored
23
.github/ISSUE_TEMPLATE/feedback.yml
vendored
@@ -1,23 +0,0 @@
|
|||||||
name: "Feedback"
|
|
||||||
description: Feedback, showcases, thoughts, needs and questions, etc.
|
|
||||||
title: "[Feedback] "
|
|
||||||
labels: ["feedback"]
|
|
||||||
body:
|
|
||||||
- type: markdown
|
|
||||||
attributes:
|
|
||||||
value: |
|
|
||||||
### ❤️ Thanks for your feedback!
|
|
||||||
> Come join our [Discord community](https://discord.gg/knqAbbBbeX) and paint the ideal home cloud with us.
|
|
||||||
- type: textarea
|
|
||||||
id: description
|
|
||||||
attributes:
|
|
||||||
label: Description
|
|
||||||
placeholder: What do you want to tell us?
|
|
||||||
validations:
|
|
||||||
required: true
|
|
||||||
- type: textarea
|
|
||||||
id: additional
|
|
||||||
attributes:
|
|
||||||
label: Additional Information
|
|
||||||
description: Please add logs/files/screenshots if you have them to help us better understanding.
|
|
||||||
|
|
||||||
22
.github/workflows/codecov.yml
vendored
22
.github/workflows/codecov.yml
vendored
@@ -1,22 +0,0 @@
|
|||||||
name: Collect Code Coverage
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: Run coverage
|
|
||||||
run: go test -race -failfast -coverprofile=coverage.txt -covermode=atomic -v ./...
|
|
||||||
- name: Upload coverage to Codecov
|
|
||||||
uses: codecov/codecov-action@v3
|
|
||||||
2
.github/workflows/demo.yml
vendored
2
.github/workflows/demo.yml
vendored
@@ -33,7 +33,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Get old instance and snapshot name, create new instance name
|
- name: Get old instance and snapshot name, create new instance name
|
||||||
run: |
|
run: |
|
||||||
echo "OLD_INSTANCE_SNAPSHOT_NAME=$(aws lightsail get-instance-snapshots | grep '"name": "casaos-0.3.6-1666150291' | sed 's/ //g' | sed 's/"//g' | sed 's/,//g' | sed 's/name://g')" >> $GITHUB_ENV
|
echo "OLD_INSTANCE_SNAPSHOT_NAME=$(aws lightsail get-instance-snapshots | grep '"name": "0.3.3-demo-1658402149' | sed 's/ //g' | sed 's/"//g' | sed 's/,//g' | sed 's/name://g')" >> $GITHUB_ENV
|
||||||
echo "OLD_INSTANCE_NAME=$(aws lightsail get-instances | grep '"name": "CasaOS-Demo-[0-9]' | sed 's/ //g' | sed 's/"//g' | sed 's/,//g' | sed 's/name://g')" >> $GITHUB_ENV
|
echo "OLD_INSTANCE_NAME=$(aws lightsail get-instances | grep '"name": "CasaOS-Demo-[0-9]' | sed 's/ //g' | sed 's/"//g' | sed 's/,//g' | sed 's/name://g')" >> $GITHUB_ENV
|
||||||
echo "NEW_INSTANCE_NAME=CasaOS-Demo-$(date +%s)" >> $GITHUB_ENV
|
echo "NEW_INSTANCE_NAME=CasaOS-Demo-$(date +%s)" >> $GITHUB_ENV
|
||||||
|
|
||||||
|
|||||||
58
.github/workflows/push_test_server.yml
vendored
58
.github/workflows/push_test_server.yml
vendored
@@ -1,58 +0,0 @@
|
|||||||
name: Auto Publish Website
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
permissions:
|
|
||||||
contents: write
|
|
||||||
jobs:
|
|
||||||
goreleaser:
|
|
||||||
runs-on: ubuntu-22.04
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
- name: isntall git
|
|
||||||
run: sudo apt install --yes git
|
|
||||||
- name: git global
|
|
||||||
run: sudo git config --global --add safe.directory '*'
|
|
||||||
-
|
|
||||||
name: Fetch all tags
|
|
||||||
run: sudo git fetch --force --tags
|
|
||||||
- name: Get version
|
|
||||||
id: get_version
|
|
||||||
# run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
|
||||||
run: echo "VERSION=$(git describe --abbrev=0 --tags | awk -F- '{print $1}')" >> $GITHUB_ENV
|
|
||||||
- name: show version
|
|
||||||
id: show_version
|
|
||||||
# run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
|
||||||
run: echo ${{env.VERSION}}
|
|
||||||
-
|
|
||||||
name: Set up Go
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: 1.19
|
|
||||||
-
|
|
||||||
name: Run GoReleaser
|
|
||||||
uses: goreleaser/goreleaser-action@v2
|
|
||||||
with:
|
|
||||||
# either 'goreleaser' (default) or 'goreleaser-pro'
|
|
||||||
distribution: goreleaser
|
|
||||||
version: latest
|
|
||||||
args: release --rm-dist
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
# Your GoReleaser Pro key, if you are using the 'goreleaser-pro' distribution
|
|
||||||
# GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
|
|
||||||
- name: install sshpass
|
|
||||||
run: sudo apt install sshpass --yes
|
|
||||||
- name: copy tar to target host
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
sshpass -p "${{ secrets.ssh_password }}" scp -r -o StrictHostKeyChecking=no -P ${{ secrets.ssh_port }} ./dist/*.gz root@${{ secrets.ssh_ip }}:/var/www/download
|
|
||||||
echo "ping success"
|
|
||||||
- name: send message
|
|
||||||
run: |
|
|
||||||
curl -X POST -H "Content-Type: application/json" -d '{"msg_type":"text","content":{"text":"CasaOS updated"}}' https://open.feishu.cn/open-apis/bot/v2/hook/eb8f45c7-9636-4b64-84f2-a66d9aeb9d30
|
|
||||||
26
.github/workflows/release.yml
vendored
26
.github/workflows/release.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
sudo apt update
|
sudo apt update
|
||||||
sudo apt-get --no-install-recommends --yes install \
|
sudo apt-get --no-install-recommends --yes install \
|
||||||
upx libc6-dev-amd64-cross \
|
libc6-dev-amd64-cross \
|
||||||
gcc-aarch64-linux-gnu libc6-dev-arm64-cross \
|
gcc-aarch64-linux-gnu libc6-dev-arm64-cross \
|
||||||
gcc-arm-linux-gnueabihf libc6-dev-armhf-cross
|
gcc-arm-linux-gnueabihf libc6-dev-armhf-cross
|
||||||
-
|
-
|
||||||
@@ -28,10 +28,6 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Fetch all tags
|
name: Fetch all tags
|
||||||
run: git fetch --force --tags
|
run: git fetch --force --tags
|
||||||
|
|
||||||
- name: Get version
|
|
||||||
id: get_version
|
|
||||||
run: echo ::set-output name=VERSION::${GITHUB_REF/refs\/tags\//}
|
|
||||||
-
|
-
|
||||||
name: Set up Go
|
name: Set up Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
@@ -49,23 +45,3 @@ jobs:
|
|||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
# Your GoReleaser Pro key, if you are using the 'goreleaser-pro' distribution
|
# Your GoReleaser Pro key, if you are using the 'goreleaser-pro' distribution
|
||||||
# GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
|
# GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }}
|
||||||
|
|
||||||
- name: Upload to oss
|
|
||||||
id: upload_to_oss
|
|
||||||
uses: tvrcgo/upload-to-oss@master
|
|
||||||
with:
|
|
||||||
key-id: ${{ secrets.OSS_KEY_ID }}
|
|
||||||
key-secret: ${{ secrets.OSS_KEY_SECRET }}
|
|
||||||
region: oss-cn-shanghai
|
|
||||||
bucket: casaos
|
|
||||||
assets: |
|
|
||||||
dist/checksums.txt:/IceWhaleTech/CasaOS/releases/download/${{ steps.get_version.outputs.VERSION }}/checksums.txt
|
|
||||||
dist/linux-arm-7-casaos-${{ steps.get_version.outputs.VERSION }}.tar.gz:/IceWhaleTech/CasaOS/releases/download/${{ steps.get_version.outputs.VERSION }}/linux-arm-7-casaos-${{ steps.get_version.outputs.VERSION }}.tar.gz
|
|
||||||
dist/linux-arm64-casaos-${{ steps.get_version.outputs.VERSION }}.tar.gz:/IceWhaleTech/CasaOS/releases/download/${{ steps.get_version.outputs.VERSION }}/linux-arm64-casaos-${{ steps.get_version.outputs.VERSION }}.tar.gz
|
|
||||||
dist/linux-amd64-casaos-${{ steps.get_version.outputs.VERSION }}.tar.gz:/IceWhaleTech/CasaOS/releases/download/${{ steps.get_version.outputs.VERSION }}/linux-amd64-casaos-${{ steps.get_version.outputs.VERSION }}.tar.gz
|
|
||||||
dist/linux-arm-7-casaos-migration-tool-${{ steps.get_version.outputs.VERSION }}.tar.gz:/IceWhaleTech/CasaOS/releases/download/${{ steps.get_version.outputs.VERSION }}/linux-arm-7-casaos-migration-tool-${{ steps.get_version.outputs.VERSION }}.tar.gz
|
|
||||||
dist/linux-arm64-casaos-migration-tool-${{ steps.get_version.outputs.VERSION }}.tar.gz:/IceWhaleTech/CasaOS/releases/download/${{ steps.get_version.outputs.VERSION }}/linux-arm64-casaos-migration-tool-${{ steps.get_version.outputs.VERSION }}.tar.gz
|
|
||||||
dist/linux-amd64-casaos-migration-tool-${{ steps.get_version.outputs.VERSION }}.tar.gz:/IceWhaleTech/CasaOS/releases/download/${{ steps.get_version.outputs.VERSION }}/linux-amd64-casaos-migration-tool-${{ steps.get_version.outputs.VERSION }}.tar.gz
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -36,7 +36,4 @@ main
|
|||||||
github.com
|
github.com
|
||||||
.all-contributorsrc
|
.all-contributorsrc
|
||||||
dist
|
dist
|
||||||
CasaOS
|
CasaOS
|
||||||
|
|
||||||
# System Files
|
|
||||||
.DS_Store
|
|
||||||
@@ -3,15 +3,13 @@
|
|||||||
project_name: casaos
|
project_name: casaos
|
||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- go generate
|
# You may remove this if you don't use go modules.
|
||||||
- go run github.com/google/go-licenses@latest check . --disallowed_types=restricted
|
|
||||||
- go mod tidy
|
- go mod tidy
|
||||||
- go test -race -v ./...
|
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
- id: casaos-amd64
|
- id: casaos-amd64
|
||||||
binary: build/sysroot/usr/bin/casaos
|
binary: build/sysroot/usr/bin/casaos
|
||||||
env:
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
- CC=x86_64-linux-gnu-gcc
|
- CC=x86_64-linux-gnu-gcc
|
||||||
gcflags:
|
gcflags:
|
||||||
- all=-N -l
|
- all=-N -l
|
||||||
@@ -20,14 +18,17 @@ builds:
|
|||||||
tags:
|
tags:
|
||||||
- musl
|
- musl
|
||||||
- netgo
|
- netgo
|
||||||
- osusergo
|
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
goarch:
|
goarch:
|
||||||
- amd64
|
- amd64
|
||||||
|
hooks:
|
||||||
|
post:
|
||||||
|
- find build/sysroot -type f | xargs -L 1 realpath --relative-to=build/sysroot > build/sysroot.manifest
|
||||||
- id: casaos-arm64
|
- id: casaos-arm64
|
||||||
binary: build/sysroot/usr/bin/casaos
|
binary: build/sysroot/usr/bin/casaos
|
||||||
env:
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
- CC=aarch64-linux-gnu-gcc
|
- CC=aarch64-linux-gnu-gcc
|
||||||
gcflags:
|
gcflags:
|
||||||
- all=-N -l
|
- all=-N -l
|
||||||
@@ -36,14 +37,17 @@ builds:
|
|||||||
tags:
|
tags:
|
||||||
- musl
|
- musl
|
||||||
- netgo
|
- netgo
|
||||||
- osusergo
|
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
goarch:
|
goarch:
|
||||||
- arm64
|
- arm64
|
||||||
|
hooks:
|
||||||
|
post:
|
||||||
|
- find build/sysroot -type f | xargs -L 1 realpath --relative-to=build/sysroot > build/sysroot.manifest
|
||||||
- id: casaos-arm-7
|
- id: casaos-arm-7
|
||||||
binary: build/sysroot/usr/bin/casaos
|
binary: build/sysroot/usr/bin/casaos
|
||||||
env:
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
- CC=arm-linux-gnueabihf-gcc
|
- CC=arm-linux-gnueabihf-gcc
|
||||||
gcflags:
|
gcflags:
|
||||||
- all=-N -l
|
- all=-N -l
|
||||||
@@ -52,17 +56,20 @@ builds:
|
|||||||
tags:
|
tags:
|
||||||
- musl
|
- musl
|
||||||
- netgo
|
- netgo
|
||||||
- osusergo
|
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
goarch:
|
goarch:
|
||||||
- arm
|
- arm
|
||||||
goarm:
|
goarm:
|
||||||
- "7"
|
- "7"
|
||||||
|
hooks:
|
||||||
|
post:
|
||||||
|
- find build/sysroot -type f | xargs -L 1 realpath --relative-to=build/sysroot > build/sysroot.manifest
|
||||||
- id: casaos-migration-tool-amd64
|
- id: casaos-migration-tool-amd64
|
||||||
binary: build/sysroot/usr/bin/casaos-migration-tool
|
binary: build/sysroot/usr/bin/casaos-migration-tool
|
||||||
main: ./cmd/migration-tool
|
main: ./cmd/migration-tool
|
||||||
env:
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
- CC=x86_64-linux-gnu-gcc
|
- CC=x86_64-linux-gnu-gcc
|
||||||
gcflags:
|
gcflags:
|
||||||
- all=-N -l
|
- all=-N -l
|
||||||
@@ -71,7 +78,6 @@ builds:
|
|||||||
tags:
|
tags:
|
||||||
- musl
|
- musl
|
||||||
- netgo
|
- netgo
|
||||||
- osusergo
|
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
goarch:
|
goarch:
|
||||||
@@ -80,6 +86,7 @@ builds:
|
|||||||
binary: build/sysroot/usr/bin/casaos-migration-tool
|
binary: build/sysroot/usr/bin/casaos-migration-tool
|
||||||
main: ./cmd/migration-tool
|
main: ./cmd/migration-tool
|
||||||
env:
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
- CC=aarch64-linux-gnu-gcc
|
- CC=aarch64-linux-gnu-gcc
|
||||||
gcflags:
|
gcflags:
|
||||||
- all=-N -l
|
- all=-N -l
|
||||||
@@ -88,7 +95,6 @@ builds:
|
|||||||
tags:
|
tags:
|
||||||
- musl
|
- musl
|
||||||
- netgo
|
- netgo
|
||||||
- osusergo
|
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
goarch:
|
goarch:
|
||||||
@@ -97,6 +103,7 @@ builds:
|
|||||||
binary: build/sysroot/usr/bin/casaos-migration-tool
|
binary: build/sysroot/usr/bin/casaos-migration-tool
|
||||||
main: ./cmd/migration-tool
|
main: ./cmd/migration-tool
|
||||||
env:
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
- CC=arm-linux-gnueabihf-gcc
|
- CC=arm-linux-gnueabihf-gcc
|
||||||
gcflags:
|
gcflags:
|
||||||
- all=-N -l
|
- all=-N -l
|
||||||
@@ -105,7 +112,6 @@ builds:
|
|||||||
tags:
|
tags:
|
||||||
- musl
|
- musl
|
||||||
- netgo
|
- netgo
|
||||||
- osusergo
|
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
goarch:
|
goarch:
|
||||||
|
|||||||
@@ -3,139 +3,115 @@
|
|||||||
project_name: casaos
|
project_name: casaos
|
||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- go generate
|
# You may remove this if you don't use go modules.
|
||||||
- go run github.com/google/go-licenses@latest check . --disallowed_types=restricted
|
|
||||||
- go mod tidy
|
- go mod tidy
|
||||||
- go test -race -v ./...
|
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
- id: casaos-amd64
|
- id: casaos-amd64
|
||||||
binary: build/sysroot/usr/bin/casaos
|
binary: build/sysroot/usr/bin/casaos
|
||||||
hooks:
|
|
||||||
post:
|
|
||||||
- upx --best --lzma -v --no-progress "{{ .Path }}"
|
|
||||||
env:
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
- CC=x86_64-linux-gnu-gcc
|
- CC=x86_64-linux-gnu-gcc
|
||||||
ldflags:
|
ldflags:
|
||||||
- -X main.commit={{.Commit}}
|
|
||||||
- -X main.date={{.Date}}
|
|
||||||
- -s
|
- -s
|
||||||
- -w
|
- -w
|
||||||
- -extldflags "-static"
|
- -extldflags "-static"
|
||||||
tags:
|
tags:
|
||||||
- musl
|
- musl
|
||||||
- netgo
|
- netgo
|
||||||
- osusergo
|
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
goarch:
|
goarch:
|
||||||
- amd64
|
- amd64
|
||||||
- id: casaos-arm64
|
|
||||||
binary: build/sysroot/usr/bin/casaos
|
|
||||||
hooks:
|
hooks:
|
||||||
post:
|
post:
|
||||||
- upx --best --lzma -v --no-progress "{{ .Path }}"
|
- find build/sysroot -type f | xargs -L 1 realpath --relative-to=build/sysroot > build/sysroot.manifest
|
||||||
|
- id: casaos-arm64
|
||||||
|
binary: build/sysroot/usr/bin/casaos
|
||||||
env:
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
- CC=aarch64-linux-gnu-gcc
|
- CC=aarch64-linux-gnu-gcc
|
||||||
ldflags:
|
ldflags:
|
||||||
- -X main.commit={{.Commit}}
|
|
||||||
- -X main.date={{.Date}}
|
|
||||||
- -s
|
- -s
|
||||||
- -w
|
- -w
|
||||||
- -extldflags "-static"
|
- -extldflags "-static"
|
||||||
tags:
|
tags:
|
||||||
- musl
|
- musl
|
||||||
- netgo
|
- netgo
|
||||||
- osusergo
|
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
goarch:
|
goarch:
|
||||||
- arm64
|
- arm64
|
||||||
- id: casaos-arm-7
|
|
||||||
binary: build/sysroot/usr/bin/casaos
|
|
||||||
hooks:
|
hooks:
|
||||||
post:
|
post:
|
||||||
- upx --best --lzma -v --no-progress "{{ .Path }}"
|
- find build/sysroot -type f | xargs -L 1 realpath --relative-to=build/sysroot > build/sysroot.manifest
|
||||||
|
- id: casaos-arm-7
|
||||||
|
binary: build/sysroot/usr/bin/casaos
|
||||||
env:
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
- CC=arm-linux-gnueabihf-gcc
|
- CC=arm-linux-gnueabihf-gcc
|
||||||
ldflags:
|
ldflags:
|
||||||
- -X main.commit={{.Commit}}
|
|
||||||
- -X main.date={{.Date}}
|
|
||||||
- -s
|
- -s
|
||||||
- -w
|
- -w
|
||||||
- -extldflags "-static"
|
- -extldflags "-static"
|
||||||
tags:
|
tags:
|
||||||
- musl
|
- musl
|
||||||
- netgo
|
- netgo
|
||||||
- osusergo
|
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
goarch:
|
goarch:
|
||||||
- arm
|
- arm
|
||||||
goarm:
|
goarm:
|
||||||
- "7"
|
- "7"
|
||||||
- id: casaos-migration-tool-amd64
|
|
||||||
binary: build/sysroot/usr/bin/casaos-migration-tool
|
|
||||||
hooks:
|
hooks:
|
||||||
post:
|
post:
|
||||||
- upx --best --lzma -v --no-progress "{{ .Path }}"
|
- find build/sysroot -type f | xargs -L 1 realpath --relative-to=build/sysroot > build/sysroot.manifest
|
||||||
|
- id: casaos-migration-tool-amd64
|
||||||
|
binary: build/sysroot/usr/bin/casaos-migration-tool
|
||||||
main: ./cmd/migration-tool
|
main: ./cmd/migration-tool
|
||||||
env:
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
- CC=x86_64-linux-gnu-gcc
|
- CC=x86_64-linux-gnu-gcc
|
||||||
ldflags:
|
ldflags:
|
||||||
- -X main.commit={{.Commit}}
|
|
||||||
- -X main.date={{.Date}}
|
|
||||||
- -s
|
- -s
|
||||||
- -w
|
- -w
|
||||||
- -extldflags "-static"
|
- -extldflags "-static"
|
||||||
tags:
|
tags:
|
||||||
- musl
|
- musl
|
||||||
- netgo
|
- netgo
|
||||||
- osusergo
|
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
goarch:
|
goarch:
|
||||||
- amd64
|
- amd64
|
||||||
- id: casaos-migration-tool-arm64
|
- id: casaos-migration-tool-arm64
|
||||||
binary: build/sysroot/usr/bin/casaos-migration-tool
|
binary: build/sysroot/usr/bin/casaos-migration-tool
|
||||||
hooks:
|
|
||||||
post:
|
|
||||||
- upx --best --lzma -v --no-progress "{{ .Path }}"
|
|
||||||
main: ./cmd/migration-tool
|
main: ./cmd/migration-tool
|
||||||
env:
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
- CC=aarch64-linux-gnu-gcc
|
- CC=aarch64-linux-gnu-gcc
|
||||||
ldflags:
|
ldflags:
|
||||||
- -X main.commit={{.Commit}}
|
|
||||||
- -X main.date={{.Date}}
|
|
||||||
- -s
|
- -s
|
||||||
- -w
|
- -w
|
||||||
- -extldflags "-static"
|
- -extldflags "-static"
|
||||||
tags:
|
tags:
|
||||||
- musl
|
- musl
|
||||||
- netgo
|
- netgo
|
||||||
- osusergo
|
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
goarch:
|
goarch:
|
||||||
- arm64
|
- arm64
|
||||||
- id: casaos-migration-tool-arm-7
|
- id: casaos-migration-tool-arm-7
|
||||||
binary: build/sysroot/usr/bin/casaos-migration-tool
|
binary: build/sysroot/usr/bin/casaos-migration-tool
|
||||||
hooks:
|
|
||||||
post:
|
|
||||||
- upx --best --lzma -v --no-progress "{{ .Path }}"
|
|
||||||
main: ./cmd/migration-tool
|
main: ./cmd/migration-tool
|
||||||
env:
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
- CC=arm-linux-gnueabihf-gcc
|
- CC=arm-linux-gnueabihf-gcc
|
||||||
ldflags:
|
ldflags:
|
||||||
- -X main.commit={{.Commit}}
|
|
||||||
- -X main.date={{.Date}}
|
|
||||||
- -s
|
- -s
|
||||||
- -w
|
- -w
|
||||||
- -extldflags "-static"
|
- -extldflags "-static"
|
||||||
tags:
|
tags:
|
||||||
- musl
|
- musl
|
||||||
- netgo
|
- netgo
|
||||||
- osusergo
|
|
||||||
goos:
|
goos:
|
||||||
- linux
|
- linux
|
||||||
goarch:
|
goarch:
|
||||||
|
|||||||
82
CHANGELOG.md
82
CHANGELOG.md
@@ -16,90 +16,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
### Security
|
### Security
|
||||||
|
|
||||||
|
|
||||||
## [0.4.1] - 2023-1-19
|
|
||||||
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- [Disk] Added disk merging feature in storage management (beta) that allows for multiple disks to be merged into a single storage space
|
|
||||||
- [System] Added option for startpage.com search engine
|
|
||||||
- [APP] Added app cloning feature in the app's context menu.
|
|
||||||
### Changed
|
|
||||||
- [APP] Improved app installation process, including display of the installation process, checks for successful installation, and prompts
|
|
||||||
- [System] Binary sizes are 40%~60% smaller (thanks to upx)
|
|
||||||
- [App] Optimization of install and update for certain country.
|
|
||||||
- [All] Lots of bug fixes
|
|
||||||
|
|
||||||
## [0.4.0] - 2022-12-13
|
|
||||||
### Added
|
|
||||||
|
|
||||||
- [Developer] Included `casaos-cli` command tool for debugging
|
|
||||||
- [Developer] Added message bus for events and actions - Use `casaos-cli message-bus` to manage.
|
|
||||||
- [Disk] Disk notification in Dashboard
|
|
||||||
- [System] Restart/shutdown directly from CasaOS Dashboard
|
|
||||||
### Changed
|
|
||||||
|
|
||||||
- [General] CasaOS new logo!
|
|
||||||
- [App] Redesign of Featured App
|
|
||||||
- [App] Now you can choose to delete userdata along with app uninstallation
|
|
||||||
|
|
||||||
### Security
|
|
||||||
|
|
||||||
- [System] Fixed a shell injection issue for better security
|
|
||||||
|
|
||||||
### Fixed
|
### Fixed
|
||||||
|
|
||||||
- [System] Re-instate default zone0 for CPU Temp ([#694](https://github.com/IceWhaleTech/CasaOS/issues/694))
|
|
||||||
- [Disk] Fixed storage name with extra `-1` after rebooting ([#698](https://github.com/IceWhaleTech/CasaOS/issues/698))
|
|
||||||
- [Disk] Fixed disk check so it does not impact disk going into idle ([#704](https://github.com/IceWhaleTech/CasaOS/issues/704))
|
|
||||||
|
|
||||||
## [0.3.8] 2022-11-21
|
## [0.3.6-alpha.1] - 2022-09-06
|
||||||
|
|
||||||
### Added
|
|
||||||
- [System] Add system announcement
|
|
||||||
- [App] Allow to turn off the display of "Existing Docker Apps" in the settings.
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- [System] Improve the feedback function, you can submit feedback in the bottom right corner of WebUI.
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- [System] Fix CPU Temp for other platforms ([#661](https://github.com/IceWhaleTech/CasaOS/issues/661))
|
|
||||||
|
|
||||||
## [0.3.7.1] 2022-11-04
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
|
|
||||||
- Fix memory leak issue ([#658](https://github.com/IceWhaleTech/CasaOS/issues/658)[#646](https://github.com/IceWhaleTech/CasaOS/issues/646))
|
|
||||||
- Solve the problem of local application import failure ([#490](https://github.com/IceWhaleTech/CasaOS/issues/490))
|
|
||||||
|
|
||||||
## [0.3.7] 2022-10-28
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- [Storage] Disk merge (Beta), you can merge multiple disks into a single storage space (currently you need to enable this feature from the command line)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- [Files] Changed the cache file storage location, now the file upload size is not limited by the system disk capacity.
|
|
||||||
- [Scripts] Updated installation and upgrade scripts to support more Debian-based Linux distributions.
|
|
||||||
- [Engineering] Refactored Local Storage into a standalone service as part of CasaOS modularization.
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- [Apps] App list update mechanism improved, now you can see the latest apps in App Store immediately.
|
|
||||||
- [Storage] Fixed a lot of known issues
|
|
||||||
|
|
||||||
### Added
|
|
||||||
- [Storage] Disk merge (Beta), you can merge multiple disks into a single storage space (currently you need to enable this feature from the command line)
|
|
||||||
|
|
||||||
### Changed
|
|
||||||
- [Files] Changed the cache file storage location, now the file upload size is not limited by the system disk capacity.
|
|
||||||
- [Scripts] Updated installation and upgrade scripts to support more Debian-based Linux distributions.
|
|
||||||
- [Engineering] Refactored Local Storage into a standalone service as part of CasaOS modularization.
|
|
||||||
|
|
||||||
### Fixed
|
|
||||||
- [Apps] App list update mechanism improved, now you can see the latest apps in App Store immediately.
|
|
||||||
- [Storage] Fixed a lot of known issues
|
|
||||||
|
|
||||||
|
|
||||||
## [0.3.6] - 2022-09-06
|
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
- [System] Added power and temperature info to performance widget (Intel)
|
- [System] Added power and temperature info to performance widget (Intel)
|
||||||
|
|||||||
50
README.md
50
README.md
@@ -7,9 +7,9 @@
|
|||||||
<p align="center">
|
<p align="center">
|
||||||
<!-- CasaOS Banner -->
|
<!-- CasaOS Banner -->
|
||||||
<picture>
|
<picture>
|
||||||
<source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/IceWhaleTech/logo/main/casaos/casaos_banner_dark_night_800x300.png">
|
<source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/IceWhaleTech/logo/main/casaos/casaos_banner_dark_night_800px.png">
|
||||||
<source media="(prefers-color-scheme: light)" srcset="https://raw.githubusercontent.com/IceWhaleTech/logo/main/casaos/casaos_banner_twilight_blue_800x300.png">
|
<source media="(prefers-color-scheme: light)" srcset="https://raw.githubusercontent.com/IceWhaleTech/logo/main/casaos/casaos_banner_twilight_blue_800px.png">
|
||||||
<img alt="CasaOS" src="https://raw.githubusercontent.com/IceWhaleTech/logo/main/casaos/casaos_banner_twilight_blue_800x300.png">
|
<img alt="CasaOS" src="https://raw.githubusercontent.com/IceWhaleTech/logo/main/casaos/casaos_banner_twilight_blue_800px.png">
|
||||||
</picture>
|
</picture>
|
||||||
<br/>
|
<br/>
|
||||||
<i>Connect with the community developing HOME CLOUD, creating self-sovereign, and defining the future of the distributed cloud.</i>
|
<i>Connect with the community developing HOME CLOUD, creating self-sovereign, and defining the future of the distributed cloud.</i>
|
||||||
@@ -28,9 +28,6 @@
|
|||||||
<a href="https://github.com/IceWhaleTech/CasaOS/issues" target="_blank">
|
<a href="https://github.com/IceWhaleTech/CasaOS/issues" target="_blank">
|
||||||
<img alt="CasaOS Issues" src="https://img.shields.io/github/issues/IceWhaleTech/CasaOS?color=162453&style=flat-square&label=Issues" />
|
<img alt="CasaOS Issues" src="https://img.shields.io/github/issues/IceWhaleTech/CasaOS?color=162453&style=flat-square&label=Issues" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://codecov.io/gh/IceWhaleTech/CasaOS" >
|
|
||||||
<img src="https://codecov.io/gh/IceWhaleTech/CasaOS/branch/main/graph/badge.svg?token=l9uMKGlkxM"/>
|
|
||||||
</a>
|
|
||||||
<a href="https://github.com/IceWhaleTech/CasaOS/stargazers" target="_blank">
|
<a href="https://github.com/IceWhaleTech/CasaOS/stargazers" target="_blank">
|
||||||
<img alt="CasaOS Stargazers" src="https://img.shields.io/github/stars/IceWhaleTech/CasaOS?color=162453&style=flat-square&label=Stars" />
|
<img alt="CasaOS Stargazers" src="https://img.shields.io/github/stars/IceWhaleTech/CasaOS?color=162453&style=flat-square&label=Stars" />
|
||||||
</a>
|
</a>
|
||||||
@@ -117,20 +114,19 @@ Community Support
|
|||||||
- Armbian 22.04 (✅ Tested)
|
- Armbian 22.04 (✅ Tested)
|
||||||
- Alpine (🚧 Not Fully Tested Yet)
|
- Alpine (🚧 Not Fully Tested Yet)
|
||||||
- OpenWrt (🚧 Not Fully Tested Yet)
|
- OpenWrt (🚧 Not Fully Tested Yet)
|
||||||
- ArchLinux (🚧 Not Fully Tested Yet)
|
|
||||||
|
|
||||||
### Quick Setup CasaOS
|
### Quick Setup CasaOS
|
||||||
|
|
||||||
Freshly install a system from the list above and run this command:
|
Freshly install a system from the list above and run this command:
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
wget -qO- https://get.casaos.io | sudo bash
|
wget -qO- https://get.casaos.io | bash
|
||||||
```
|
```
|
||||||
|
|
||||||
or
|
or
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
curl -fsSL https://get.casaos.io | sudo bash
|
curl -fsSL https://get.casaos.io | bash
|
||||||
```
|
```
|
||||||
|
|
||||||
### Uninstall CasaOS
|
### Uninstall CasaOS
|
||||||
@@ -145,7 +141,7 @@ casaos-uninstall
|
|||||||
Before v0.3.3
|
Before v0.3.3
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
curl -fsSL https://get.icewhale.io/casaos-uninstall.sh | sudo bash
|
curl -fsSL https://get.icewhale.io/casaos-uninstall.sh | bash
|
||||||
```
|
```
|
||||||
|
|
||||||
## Community
|
## Community
|
||||||
@@ -166,8 +162,38 @@ We believes that through community-driven collaborative innovation and open comm
|
|||||||
|
|
||||||
CasaOS is a community-driven open source project and the people involved are CasaOS users. That means CasaOS will always need contributions from community members just like you!
|
CasaOS is a community-driven open source project and the people involved are CasaOS users. That means CasaOS will always need contributions from community members just like you!
|
||||||
|
|
||||||
- See <https://wiki.casaos.io/en/contribute> for ways of contribution to CasaOS
|
<details>
|
||||||
- See <https://wiki.casaos.io/en/contribute/development> if you want to be involved in code contribution specificially
|
<summary><b>How can I get involved? 🧐</b></summary>
|
||||||
|
<p>
|
||||||
|
|
||||||
|
### Coding 💻 (WIP)
|
||||||
|
|
||||||
|
We are refining documentation that can be used for effective community collaboration. Feel free to start a discussion if you have a good idea.
|
||||||
|
|
||||||
|
### Helping Users 💬
|
||||||
|
|
||||||
|
If you have extensive knowledge of CasaOS and related areas. We highly encourage you to help others as much as you can in Discord and Discussions.
|
||||||
|
|
||||||
|
Discord: [https://discord.gg/knqAbbBbeX](https://discord.gg/knqAbbBbeX)
|
||||||
|
|
||||||
|
GitHub Discussions: [https://github.com/IceWhaleTech/CasaOS/discussions](https://github.com/IceWhaleTech/CasaOS/discussions)
|
||||||
|
|
||||||
|
### Helping with Translations 🌍 (WIP)
|
||||||
|
|
||||||
|
CasaOS officially supports English and Chinese. You are welcome to help make CasaOS available in more languages.
|
||||||
|
|
||||||
|
### Performing Alpha Testing ⚠️
|
||||||
|
|
||||||
|
Alpha testing is quality assurance testing that is engaged and driven by the community. It's a great way to get involved in contributing and experiencing the latest features before a new release.
|
||||||
|
|
||||||
|
The documentation is being refined and you can contact @JohnGuan via [Discord](https://discord.gg/knqAbbBbeX). Ask to join the #casaos-alpha channel.
|
||||||
|
|
||||||
|
### Writing Documentation 📖 (WIP)
|
||||||
|
|
||||||
|
Help make our documentation better by writing new content for the CasaOS Wiki, correcting existing material, or translating content into new languages.
|
||||||
|
|
||||||
|
</p>
|
||||||
|
</details>
|
||||||
|
|
||||||
## Credits
|
## Credits
|
||||||
|
|
||||||
|
|||||||
@@ -1,116 +0,0 @@
|
|||||||
openapi: 3.0.3
|
|
||||||
|
|
||||||
info:
|
|
||||||
title: CasaOS API
|
|
||||||
version: v2
|
|
||||||
description: |
|
|
||||||
<picture>
|
|
||||||
<source media="(prefers-color-scheme: dark)" srcset="https://raw.githubusercontent.com/IceWhaleTech/logo/main/casaos/casaos_banner_dark_night_800px.png">
|
|
||||||
<source media="(prefers-color-scheme: light)" srcset="https://raw.githubusercontent.com/IceWhaleTech/logo/main/casaos/casaos_banner_twilight_blue_800px.png">
|
|
||||||
<img alt="CasaOS" src="https://raw.githubusercontent.com/IceWhaleTech/logo/main/casaos/casaos_banner_twilight_blue_800px.png">
|
|
||||||
</picture>
|
|
||||||
|
|
||||||
CasaOS API provides miscellaneous methods for different scenarios.
|
|
||||||
|
|
||||||
For issues and discussions, please visit the [GitHub repository](https://github.com/IceWhaleTech/CasaOS) or join [our Discord](https://discord.gg/knqAbbBbeX).
|
|
||||||
|
|
||||||
servers:
|
|
||||||
- url: /v2/casaos
|
|
||||||
|
|
||||||
tags:
|
|
||||||
- name: Health methods
|
|
||||||
description: |-
|
|
||||||
(TODO)
|
|
||||||
- name: File methods
|
|
||||||
description: |-
|
|
||||||
(TODO)
|
|
||||||
|
|
||||||
x-tagGroups:
|
|
||||||
- name: Methods
|
|
||||||
tags:
|
|
||||||
- Health methods
|
|
||||||
|
|
||||||
security:
|
|
||||||
- access_token: []
|
|
||||||
|
|
||||||
paths:
|
|
||||||
/health/services:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- Health methods
|
|
||||||
summary: Get service status
|
|
||||||
description: |-
|
|
||||||
Get running status of each `casaos-*` service.
|
|
||||||
operationId: getHealthServices
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
$ref: "#/components/responses/GetHealthServicesOK"
|
|
||||||
"500":
|
|
||||||
$ref: "#/components/responses/ResponseInternalServerError"
|
|
||||||
/file/test:
|
|
||||||
get:
|
|
||||||
tags:
|
|
||||||
- File methods
|
|
||||||
summary: Test file methods
|
|
||||||
description: |-
|
|
||||||
Test file methods.
|
|
||||||
operationId: getFileTest
|
|
||||||
responses:
|
|
||||||
"200":
|
|
||||||
$ref: "#/components/responses/ResponseOK"
|
|
||||||
"500":
|
|
||||||
$ref: "#/components/responses/ResponseInternalServerError"
|
|
||||||
components:
|
|
||||||
securitySchemes:
|
|
||||||
access_token:
|
|
||||||
type: apiKey
|
|
||||||
in: header
|
|
||||||
name: Authorization
|
|
||||||
|
|
||||||
responses:
|
|
||||||
ResponseOK:
|
|
||||||
description: OK
|
|
||||||
content:
|
|
||||||
application/json:
|
|
||||||
schema:
|
|
||||||
$ref: "#/components/schemas/BaseResponse"
|
|
||||||
|
|
||||||
ResponseInternalServerError:
|
|
||||||
description: Internal Server Error
|
|
||||||
content:
|
|
||||||
application/json:
|
|
||||||
schema:
|
|
||||||
$ref: "#/components/schemas/BaseResponse"
|
|
||||||
|
|
||||||
GetHealthServicesOK:
|
|
||||||
description: OK
|
|
||||||
content:
|
|
||||||
application/json:
|
|
||||||
schema:
|
|
||||||
allOf:
|
|
||||||
- $ref: "#/components/schemas/BaseResponse"
|
|
||||||
- properties:
|
|
||||||
data:
|
|
||||||
$ref: "#/components/schemas/HealthServices"
|
|
||||||
|
|
||||||
schemas:
|
|
||||||
BaseResponse:
|
|
||||||
properties:
|
|
||||||
message:
|
|
||||||
readOnly: true
|
|
||||||
description: message returned by server side if there is any
|
|
||||||
type: string
|
|
||||||
example: ""
|
|
||||||
|
|
||||||
HealthServices:
|
|
||||||
properties:
|
|
||||||
running:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
example: "casaos-gateway.service"
|
|
||||||
not_running:
|
|
||||||
type: array
|
|
||||||
items:
|
|
||||||
type: string
|
|
||||||
example: "casaos.service"
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
|
|
||||||
<head>
|
|
||||||
<title>CasaOS | Developers</title>
|
|
||||||
|
|
||||||
<meta charset="utf-8" />
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
|
||||||
<link href="https://fonts.googleapis.com/css?family=Montserrat:300,400,700|Roboto:300,400,700" rel="stylesheet">
|
|
||||||
|
|
||||||
<style>
|
|
||||||
body {
|
|
||||||
margin: 0;
|
|
||||||
padding: 0;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
</head>
|
|
||||||
|
|
||||||
<body>
|
|
||||||
<redoc spec-url='casaos/openapi.yaml' expandResponses='all' jsonSampleExpandLevel='all'></redoc>
|
|
||||||
<script src="https://cdn.jsdelivr.net/npm/redoc@next/bundles/redoc.standalone.js"> </script>
|
|
||||||
</body>
|
|
||||||
|
|
||||||
</html>
|
|
||||||
@@ -56,26 +56,12 @@ __is_migration_needed() {
|
|||||||
__is_version_gt "${version2}" "${version1}"
|
__is_version_gt "${version2}" "${version1}"
|
||||||
}
|
}
|
||||||
|
|
||||||
__get_download_domain(){
|
|
||||||
local region
|
|
||||||
# Use ipconfig.io/country and https://ifconfig.io/country_code to get the country code
|
|
||||||
region=$(curl --connect-timeout 2 -s ipconfig.io/country || echo "")
|
|
||||||
if [ "${region}" = "" ]; then
|
|
||||||
region=$(curl --connect-timeout 2 -s https://ifconfig.io/country_code || echo "")
|
|
||||||
fi
|
|
||||||
if [[ "${region}" = "China" ]] || [[ "${region}" = "CN" ]]; then
|
|
||||||
echo "https://casaos.oss-cn-shanghai.aliyuncs.com/"
|
|
||||||
else
|
|
||||||
echo "https://github.com/"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
DOWNLOAD_DOMAIN=$(__get_download_domain)
|
|
||||||
|
|
||||||
BUILD_PATH=$(dirname "${BASH_SOURCE[0]}")/../../..
|
BUILD_PATH=$(dirname "${BASH_SOURCE[0]}")/../../..
|
||||||
SOURCE_ROOT=${BUILD_PATH}/sysroot
|
SOURCE_ROOT=${BUILD_PATH}/sysroot
|
||||||
|
|
||||||
APP_NAME="casaos"
|
APP_NAME="casaos"
|
||||||
|
APP_NAME_FORMAL="CasaOS"
|
||||||
|
#APP_NAME_FORMAL="casaos-alpha"
|
||||||
|
|
||||||
# check if migration is needed
|
# check if migration is needed
|
||||||
SOURCE_BIN_PATH=${SOURCE_ROOT}/usr/bin
|
SOURCE_BIN_PATH=${SOURCE_ROOT}/usr/bin
|
||||||
@@ -87,7 +73,7 @@ CURRENT_BIN_FILE=${CURRENT_BIN_PATH}/${APP_NAME}
|
|||||||
CURRENT_BIN_FILE_LEGACY=$(realpath -e ${CURRENT_BIN_PATH_LEGACY}/${APP_NAME} || which ${APP_NAME} || echo CURRENT_BIN_FILE_LEGACY_NOT_FOUND)
|
CURRENT_BIN_FILE_LEGACY=$(realpath -e ${CURRENT_BIN_PATH_LEGACY}/${APP_NAME} || which ${APP_NAME} || echo CURRENT_BIN_FILE_LEGACY_NOT_FOUND)
|
||||||
|
|
||||||
SOURCE_VERSION="$(${SOURCE_BIN_FILE} -v)"
|
SOURCE_VERSION="$(${SOURCE_BIN_FILE} -v)"
|
||||||
CURRENT_VERSION="$(${CURRENT_BIN_FILE} -v || ${CURRENT_BIN_FILE_LEGACY} -v || (stat "${CURRENT_BIN_FILE_LEGACY}" >/dev/null && echo LEGACY_WITHOUT_VERSION) || echo CURRENT_VERSION_NOT_FOUND)"
|
CURRENT_VERSION="$(${CURRENT_BIN_FILE} -v || ${CURRENT_BIN_FILE_LEGACY} -v || (stat "${CURRENT_BIN_FILE_LEGACY}" > /dev/null && echo LEGACY_WITHOUT_VERSION) || echo CURRENT_VERSION_NOT_FOUND)"
|
||||||
|
|
||||||
__info_done "CURRENT_VERSION: ${CURRENT_VERSION}"
|
__info_done "CURRENT_VERSION: ${CURRENT_VERSION}"
|
||||||
__info_done "SOURCE_VERSION: ${SOURCE_VERSION}"
|
__info_done "SOURCE_VERSION: ${SOURCE_VERSION}"
|
||||||
@@ -99,25 +85,6 @@ if [ "${NEED_MIGRATION}" = "false" ]; then
|
|||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ARCH="unknown"
|
|
||||||
|
|
||||||
case $(uname -m) in
|
|
||||||
x86_64)
|
|
||||||
ARCH="amd64"
|
|
||||||
;;
|
|
||||||
aarch64)
|
|
||||||
ARCH="arm64"
|
|
||||||
;;
|
|
||||||
armv7l)
|
|
||||||
ARCH="arm-7"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
__error "Unsupported architecture"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
__info "ARCH: ${ARCH}"
|
|
||||||
|
|
||||||
MIGRATION_SERVICE_DIR=${1}
|
MIGRATION_SERVICE_DIR=${1}
|
||||||
|
|
||||||
if [ -z "${MIGRATION_SERVICE_DIR}" ]; then
|
if [ -z "${MIGRATION_SERVICE_DIR}" ]; then
|
||||||
@@ -128,10 +95,10 @@ MIGRATION_PATH=()
|
|||||||
|
|
||||||
CURRENT_VERSION_FOUND="false"
|
CURRENT_VERSION_FOUND="false"
|
||||||
|
|
||||||
# a VERSION_PAIR looks like "v0.3.5 <url>"
|
# a VERSION_PAIR looks like "v0.3.5 v0.3.6-alpha2"
|
||||||
#
|
#
|
||||||
# - "v0.3.5" is the current version installed on this host
|
# - "v0.3.5" is the current version installed on this host
|
||||||
# - "<url>" is the url of the migration tool
|
# - "v0.3.6-alpha2" is the version of the migration tool from GitHub
|
||||||
while read -r VERSION_PAIR; do
|
while read -r VERSION_PAIR; do
|
||||||
if [ -z "${VERSION_PAIR}" ]; then
|
if [ -z "${VERSION_PAIR}" ]; then
|
||||||
continue
|
continue
|
||||||
@@ -140,36 +107,56 @@ while read -r VERSION_PAIR; do
|
|||||||
# obtain "v0.3.5" from "v0.3.5 v0.3.6-alpha2"
|
# obtain "v0.3.5" from "v0.3.5 v0.3.6-alpha2"
|
||||||
VER1=$(echo "${VERSION_PAIR}" | cut -d' ' -f1)
|
VER1=$(echo "${VERSION_PAIR}" | cut -d' ' -f1)
|
||||||
|
|
||||||
# obtain "<url>" from "v0.3.5 <url>"
|
# obtain "v0.3.6-alpha2" from "v0.3.5 v0.3.6-alpha2"
|
||||||
URL=$(eval echo "${VERSION_PAIR}" | cut -d' ' -f2)
|
VER2=$(echo "${VERSION_PAIR}" | cut -d' ' -f2)
|
||||||
|
|
||||||
if [ "${CURRENT_VERSION}" = "${VER1// /}" ] || [ "${CURRENT_VERSION}" = "LEGACY_WITHOUT_VERSION" ]; then
|
if [ "${CURRENT_VERSION}" = "${VER1// /}" ] || [ "${CURRENT_VERSION}" = "LEGACY_WITHOUT_VERSION" ]; then
|
||||||
CURRENT_VERSION_FOUND="true"
|
CURRENT_VERSION_FOUND="true"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${CURRENT_VERSION_FOUND}" = "true" ]; then
|
if [ "${CURRENT_VERSION_FOUND}" = "true" ]; then
|
||||||
MIGRATION_PATH+=("${URL// /}")
|
MIGRATION_PATH+=("${VER2// /}")
|
||||||
fi
|
fi
|
||||||
done <"${MIGRATION_LIST_FILE}"
|
done < "${MIGRATION_LIST_FILE}"
|
||||||
|
|
||||||
if [ ${#MIGRATION_PATH[@]} -eq 0 ]; then
|
if [ ${#MIGRATION_PATH[@]} -eq 0 ]; then
|
||||||
__warning "No migration path found from ${CURRENT_VERSION} to ${SOURCE_VERSION}"
|
__warning "No migration path found from ${CURRENT_VERSION} to ${SOURCE_VERSION}"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
ARCH="unknown"
|
||||||
|
|
||||||
|
case $(uname -m) in
|
||||||
|
x86_64)
|
||||||
|
ARCH="amd64"
|
||||||
|
;;
|
||||||
|
aarch64)
|
||||||
|
ARCH="arm64"
|
||||||
|
;;
|
||||||
|
armv7l)
|
||||||
|
ARCH="arm-7"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
__error "Unsupported architecture"
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
pushd "${MIGRATION_SERVICE_DIR}"
|
pushd "${MIGRATION_SERVICE_DIR}"
|
||||||
|
|
||||||
{
|
{ for VER2 in "${MIGRATION_PATH[@]}"; do
|
||||||
for URL in "${MIGRATION_PATH[@]}"; do
|
|
||||||
MIGRATION_TOOL_FILE=$(basename "${URL}")
|
|
||||||
|
MIGRATION_TOOL_FILE=linux-"${ARCH}"-"${APP_NAME}"-migration-tool-"${VER2}".tar.gz
|
||||||
|
|
||||||
if [ -f "${MIGRATION_TOOL_FILE}" ]; then
|
if [ -f "${MIGRATION_TOOL_FILE}" ]; then
|
||||||
__info "Migration tool ${MIGRATION_TOOL_FILE} exists. Skip downloading."
|
__info "Migration tool ${MIGRATION_TOOL_FILE} exists. Skip downloading."
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
__info "Dowloading ${URL}..."
|
# MIGRATION_TOOL_URL=http://192.168.2.197:8000/v1/package/migration?type=release&name="${APP_NAME_FORMAL}"&version=${VER2}&arch=${ARCH}
|
||||||
curl -fsSL -o "${MIGRATION_TOOL_FILE}" -O "${URL}"
|
MIGRATION_TOOL_URL=https://github.com/IceWhaleTech/"${APP_NAME_FORMAL}"/releases/download/"${VER2}"/linux-"${ARCH}"-"${APP_NAME}"-migration-tool-"${VER2}".tar.gz
|
||||||
|
echo "Dowloading ${MIGRATION_TOOL_URL}..."
|
||||||
|
curl -sL -O "${MIGRATION_TOOL_URL}"
|
||||||
done
|
done
|
||||||
} || {
|
} || {
|
||||||
popd
|
popd
|
||||||
@@ -177,8 +164,8 @@ pushd "${MIGRATION_SERVICE_DIR}"
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
for URL in "${MIGRATION_PATH[@]}"; do
|
for VER2 in "${MIGRATION_PATH[@]}"; do
|
||||||
MIGRATION_TOOL_FILE=$(basename "${URL}")
|
MIGRATION_TOOL_FILE=linux-"${ARCH}"-"${APP_NAME}"-migration-tool-"${VER2}".tar.gz
|
||||||
__info "Extracting ${MIGRATION_TOOL_FILE}..."
|
__info "Extracting ${MIGRATION_TOOL_FILE}..."
|
||||||
tar zxvf "${MIGRATION_TOOL_FILE}" || __error "Failed to extract ${MIGRATION_TOOL_FILE}"
|
tar zxvf "${MIGRATION_TOOL_FILE}" || __error "Failed to extract ${MIGRATION_TOOL_FILE}"
|
||||||
|
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
LEGACY_WITHOUT_VERSION ${DOWNLOAD_DOMAIN}IceWhaleTech/CasaOS/releases/download/v0.3.6/linux-${ARCH}-casaos-migration-tool-v0.3.6.tar.gz
|
LEGACY_WITHOUT_VERSION v0.3.6
|
||||||
v0.3.5 ${DOWNLOAD_DOMAIN}IceWhaleTech/CasaOS/releases/download/v0.3.6/linux-${ARCH}-casaos-migration-tool-v0.3.6.tar.gz
|
v0.3.5 v0.3.6
|
||||||
v0.3.5.1 ${DOWNLOAD_DOMAIN}IceWhaleTech/CasaOS/releases/download/v0.3.6/linux-${ARCH}-casaos-migration-tool-v0.3.6.tar.gz
|
v0.3.5.1 v0.3.6
|
||||||
|
v0.3.6 v0.3.7
|
||||||
|
|||||||
@@ -18,9 +18,7 @@ __get_setup_script_directory_by_os_release() {
|
|||||||
} || {
|
} || {
|
||||||
pushd "${ID}" >/dev/null
|
pushd "${ID}" >/dev/null
|
||||||
} || {
|
} || {
|
||||||
[[ -n ${ID_LIKE} ]] && for ID in ${ID_LIKE}; do
|
pushd "${ID_LIKE}" >/dev/null
|
||||||
pushd "${ID}" >/dev/null && break
|
|
||||||
done
|
|
||||||
} || {
|
} || {
|
||||||
echo "Unsupported OS: ${ID} ${VERSION_CODENAME} (${ID_LIKE})"
|
echo "Unsupported OS: ${ID} ${VERSION_CODENAME} (${ID_LIKE})"
|
||||||
exit 1
|
exit 1
|
||||||
@@ -47,7 +45,7 @@ SETUP_SCRIPT_FILEPATH="${SETUP_SCRIPT_DIRECTORY}/${SETUP_SCRIPT_FILENAME}"
|
|||||||
|
|
||||||
{
|
{
|
||||||
echo "🟩 Running ${SETUP_SCRIPT_FILENAME}..."
|
echo "🟩 Running ${SETUP_SCRIPT_FILENAME}..."
|
||||||
$BASH "${SETUP_SCRIPT_FILEPATH}" "${BUILD_PATH}"
|
$SHELL "${SETUP_SCRIPT_FILEPATH}" "${BUILD_PATH}"
|
||||||
} || {
|
} || {
|
||||||
echo "🟥 ${SETUP_SCRIPT_FILENAME} failed."
|
echo "🟥 ${SETUP_SCRIPT_FILENAME} failed."
|
||||||
exit 1
|
exit 1
|
||||||
|
|||||||
@@ -1,40 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
###
|
|
||||||
# @Author: LinkLeong link@icewhale.org
|
|
||||||
# @Date: 2022-08-25 11:41:22
|
|
||||||
# @LastEditors: LinkLeong
|
|
||||||
# @LastEditTime: 2022-08-31 17:54:17
|
|
||||||
# @FilePath: /CasaOS/build/scripts/setup/service.d/casaos/debian/setup-casaos.sh
|
|
||||||
# @Description:
|
|
||||||
|
|
||||||
# @Website: https://www.casaos.io
|
|
||||||
# Copyright (c) 2022 by icewhale, All Rights Reserved.
|
|
||||||
###
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
APP_NAME="casaos"
|
|
||||||
|
|
||||||
# copy config files
|
|
||||||
CONF_PATH=/etc/casaos
|
|
||||||
OLD_CONF_PATH=/etc/casaos.conf
|
|
||||||
CONF_FILE=${CONF_PATH}/${APP_NAME}.conf
|
|
||||||
CONF_FILE_SAMPLE=${CONF_PATH}/${APP_NAME}.conf.sample
|
|
||||||
|
|
||||||
|
|
||||||
if [ -f "${OLD_CONF_PATH}" ]; then
|
|
||||||
echo "copy old conf"
|
|
||||||
cp "${OLD_CONF_PATH}" "${CONF_FILE}"
|
|
||||||
fi
|
|
||||||
if [ ! -f "${CONF_FILE}" ]; then
|
|
||||||
echo "Initializing config file..."
|
|
||||||
cp -v "${CONF_FILE_SAMPLE}" "${CONF_FILE}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rf /etc/systemd/system/casaos.service # remove old service file
|
|
||||||
|
|
||||||
systemctl daemon-reload
|
|
||||||
|
|
||||||
# enable service (without starting)
|
|
||||||
echo "Enabling service..."
|
|
||||||
systemctl enable --force --no-ask-password "${APP_NAME}.service"
|
|
||||||
@@ -31,10 +31,15 @@ if [ ! -f "${CONF_FILE}" ]; then
|
|||||||
cp -v "${CONF_FILE_SAMPLE}" "${CONF_FILE}"
|
cp -v "${CONF_FILE_SAMPLE}" "${CONF_FILE}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
rm -rf /etc/systemd/system/casaos.service # remove old service file
|
if systemctl is-active "${APP_NAME}.service" &>/dev/null ;then
|
||||||
|
echo "server started"
|
||||||
|
else
|
||||||
|
# enable and start service
|
||||||
|
systemctl daemon-reload
|
||||||
|
|
||||||
systemctl daemon-reload
|
echo "Enabling service..."
|
||||||
|
systemctl enable --force --no-ask-password "${APP_NAME}.service"
|
||||||
|
|
||||||
# enable service (without starting)
|
#echo "Starting service..."
|
||||||
echo "Enabling service..."
|
#systemctl start --force --no-ask-password "${APP_NAME}.service"
|
||||||
systemctl enable --force --no-ask-password "${APP_NAME}.service"
|
fi
|
||||||
@@ -1,6 +1,5 @@
|
|||||||
[Unit]
|
[Unit]
|
||||||
After=casaos-message-bus.service
|
After=casaos-gateway.service
|
||||||
After=rclone.service
|
|
||||||
ConditionFileNotEmpty=/etc/casaos/casaos.conf
|
ConditionFileNotEmpty=/etc/casaos/casaos.conf
|
||||||
Description=CasaOS Main Service
|
Description=CasaOS Main Service
|
||||||
|
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=rclone
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
ExecStartPre=/usr/bin/rm -f /tmp/rclone.sock
|
|
||||||
ExecStart=/usr/bin/rclone rcd --rc-addr unix:///tmp/rclone.sock --rc-no-auth
|
|
||||||
Restart=always
|
|
||||||
RestartSec=10
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
||||||
@@ -1,58 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
###
|
|
||||||
# @Author: LinkLeong link@icewhale.org
|
|
||||||
# @Date: 2022-11-15 15:51:44
|
|
||||||
# @LastEditors: LinkLeong
|
|
||||||
# @LastEditTime: 2022-11-15 15:53:37
|
|
||||||
# @FilePath: /CasaOS/build/sysroot/usr/share/casaos/cleanup/script.d/03-cleanup-casaos.sh
|
|
||||||
# @Description:
|
|
||||||
# @Website: https://www.casaos.io
|
|
||||||
# Copyright (c) 2022 by icewhale, All Rights Reserved.
|
|
||||||
###
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
readonly APP_NAME_SHORT=casaos
|
|
||||||
|
|
||||||
__get_setup_script_directory_by_os_release() {
|
|
||||||
pushd "$(dirname "${BASH_SOURCE[0]}")/../service.d/${APP_NAME_SHORT}" &>/dev/null
|
|
||||||
|
|
||||||
{
|
|
||||||
# shellcheck source=/dev/null
|
|
||||||
{
|
|
||||||
source /etc/os-release
|
|
||||||
{
|
|
||||||
pushd "${ID}"/"${VERSION_CODENAME}" &>/dev/null
|
|
||||||
} || {
|
|
||||||
pushd "${ID}" &>/dev/null
|
|
||||||
} || {
|
|
||||||
[[ -n ${ID_LIKE} ]] && for ID in ${ID_LIKE}; do
|
|
||||||
pushd "${ID}" >/dev/null && break
|
|
||||||
done
|
|
||||||
} || {
|
|
||||||
echo "Unsupported OS: ${ID} ${VERSION_CODENAME} (${ID_LIKE})"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
pwd
|
|
||||||
|
|
||||||
popd &>/dev/null
|
|
||||||
|
|
||||||
} || {
|
|
||||||
echo "Unsupported OS: unknown"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
popd &>/dev/null
|
|
||||||
}
|
|
||||||
|
|
||||||
SETUP_SCRIPT_DIRECTORY=$(__get_setup_script_directory_by_os_release)
|
|
||||||
|
|
||||||
readonly SETUP_SCRIPT_DIRECTORY
|
|
||||||
readonly SETUP_SCRIPT_FILENAME="cleanup-${APP_NAME_SHORT}.sh"
|
|
||||||
readonly SETUP_SCRIPT_FILEPATH="${SETUP_SCRIPT_DIRECTORY}/${SETUP_SCRIPT_FILENAME}"
|
|
||||||
|
|
||||||
echo "🟩 Running ${SETUP_SCRIPT_FILENAME}..."
|
|
||||||
$SHELL "${SETUP_SCRIPT_FILEPATH}" "${BUILD_PATH}"
|
|
||||||
@@ -1,204 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
readonly CASA_SERVICES=(
|
|
||||||
"casaos.service"
|
|
||||||
"devmon@devmon.service"
|
|
||||||
)
|
|
||||||
|
|
||||||
readonly CASA_EXEC=casaos
|
|
||||||
readonly CASA_CONF=/etc/casaos/casaos.conf
|
|
||||||
readonly CASA_URL=/var/run/casaos/casaos.url
|
|
||||||
readonly CASA_SERVICE_USR=/usr/lib/systemd/system/casaos.service
|
|
||||||
readonly CASA_SERVICE_LIB=/lib/systemd/system/casaos.service
|
|
||||||
readonly CASA_SERVICE_ETC=/etc/systemd/system/casaos.service
|
|
||||||
|
|
||||||
# Old Casa Files
|
|
||||||
readonly CASA_PATH=/casaOS
|
|
||||||
readonly CASA_CONF_PATH_OLD=/etc/casaos.conf
|
|
||||||
|
|
||||||
readonly aCOLOUR=(
|
|
||||||
'\e[38;5;154m' # green | Lines, bullets and separators
|
|
||||||
'\e[1m' # Bold white | Main descriptions
|
|
||||||
'\e[90m' # Grey | Credits
|
|
||||||
'\e[91m' # Red | Update notifications Alert
|
|
||||||
'\e[33m' # Yellow | Emphasis
|
|
||||||
)
|
|
||||||
|
|
||||||
Show() {
|
|
||||||
# OK
|
|
||||||
if (($1 == 0)); then
|
|
||||||
echo -e "${aCOLOUR[2]}[$COLOUR_RESET${aCOLOUR[0]} OK $COLOUR_RESET${aCOLOUR[2]}]$COLOUR_RESET $2"
|
|
||||||
# FAILED
|
|
||||||
elif (($1 == 1)); then
|
|
||||||
echo -e "${aCOLOUR[2]}[$COLOUR_RESET${aCOLOUR[3]}FAILED$COLOUR_RESET${aCOLOUR[2]}]$COLOUR_RESET $2"
|
|
||||||
# INFO
|
|
||||||
elif (($1 == 2)); then
|
|
||||||
echo -e "${aCOLOUR[2]}[$COLOUR_RESET${aCOLOUR[0]} INFO $COLOUR_RESET${aCOLOUR[2]}]$COLOUR_RESET $2"
|
|
||||||
# NOTICE
|
|
||||||
elif (($1 == 3)); then
|
|
||||||
echo -e "${aCOLOUR[2]}[$COLOUR_RESET${aCOLOUR[4]}NOTICE$COLOUR_RESET${aCOLOUR[2]}]$COLOUR_RESET $2"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
Warn() {
|
|
||||||
echo -e "${aCOLOUR[3]}$1$COLOUR_RESET"
|
|
||||||
}
|
|
||||||
|
|
||||||
trap 'onCtrlC' INT
|
|
||||||
onCtrlC() {
|
|
||||||
echo -e "${COLOUR_RESET}"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
Detecting_CasaOS() {
|
|
||||||
if [[ ! -x "$(command -v ${CASA_EXEC})" ]]; then
|
|
||||||
Show 2 "CasaOS is not detected, exit the script."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
Show 0 "This script will delete the containers you no longer use, and the CasaOS configuration files."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
Uninstall_Container() {
|
|
||||||
if [[ ${UNINSTALL_ALL_CONTAINER} == true && "$(docker ps -aq)" != "" ]]; then
|
|
||||||
Show 2 "Start deleting containers."
|
|
||||||
docker stop "$(docker ps -aq)" || Show 1 "Failed to stop all containers."
|
|
||||||
docker rm "$(docker ps -aq)" || Show 1 "Failed to delete all containers."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
Remove_Images() {
|
|
||||||
if [[ ${REMOVE_IMAGES} == "all" && "$(docker images -q)" != "" ]]; then
|
|
||||||
Show 2 "Start deleting all images."
|
|
||||||
docker rmi "$(docker images -q)" || Show 1 "Failed to delete all images."
|
|
||||||
elif [[ ${REMOVE_IMAGES} == "unuse" && "$(docker images -q)" != "" ]]; then
|
|
||||||
Show 2 "Start deleting unuse images."
|
|
||||||
docker image prune -af || Show 1 "Failed to delete unuse images."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Uninstall_Casaos() {
|
|
||||||
|
|
||||||
for SERVICE in "${CASA_SERVICES[@]}"; do
|
|
||||||
Show 2 "Stopping ${SERVICE}..."
|
|
||||||
systemctl disable --now "${SERVICE}" || Show 3 "Failed to disable ${SERVICE}"
|
|
||||||
done
|
|
||||||
|
|
||||||
# Remove Service file
|
|
||||||
if [[ -f ${CASA_SERVICE_USR} ]]; then
|
|
||||||
rm -rvf ${CASA_SERVICE_USR}
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f ${CASA_SERVICE_LIB} ]]; then
|
|
||||||
rm -rvf ${CASA_SERVICE_LIB}
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f ${CASA_SERVICE_ETC} ]]; then
|
|
||||||
rm -rvf ${CASA_SERVICE_ETC}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Old Casa Files
|
|
||||||
if [[ -d ${CASA_PATH} ]]; then
|
|
||||||
rm -rvf ${CASA_PATH} || Show 1 "Failed to delete legacy CasaOS files."
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f ${CASA_CONF_PATH_OLD} ]]; then
|
|
||||||
rm -rvf ${CASA_CONF_PATH_OLD}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# New Casa Files
|
|
||||||
if [[ ${REMOVE_APP_DATA} = true ]]; then
|
|
||||||
rm -rvf /DATA/AppData || Show 1 "Failed to delete AppData."
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rvf "$(which ${CASA_EXEC})" || Show 3 "Failed to remove ${CASA_EXEC}"
|
|
||||||
rm -rvf ${CASA_CONF} || Show 3 "Failed to remove ${CASA_CONF}"
|
|
||||||
rm -rvf ${CASA_URL} || Show 3 "Failed to remove ${CASA_URL}"
|
|
||||||
|
|
||||||
rm -rvf /var/lib/casaos/app_category.json
|
|
||||||
rm -rvf /var/lib/casaos/app_list.json
|
|
||||||
rm -rvf /var/lib/casaos/docker_root
|
|
||||||
}
|
|
||||||
|
|
||||||
Detecting_CasaOS
|
|
||||||
|
|
||||||
while true; do
|
|
||||||
echo -n -e " ${aCOLOUR[4]}Do you want delete all containers? Y/n :${COLOUR_RESET}"
|
|
||||||
read -r input
|
|
||||||
case $input in
|
|
||||||
[yY][eE][sS] | [yY])
|
|
||||||
UNINSTALL_ALL_CONTAINER=true
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
[nN][oO] | [nN])
|
|
||||||
UNINSTALL_ALL_CONTAINER=false
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
Warn " Invalid input..."
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ ${UNINSTALL_ALL_CONTAINER} == true ]]; then
|
|
||||||
while true; do
|
|
||||||
echo -n -e " ${aCOLOUR[4]}Do you want delete all images? Y/n :${COLOUR_RESET}"
|
|
||||||
read -r input
|
|
||||||
case $input in
|
|
||||||
[yY][eE][sS] | [yY])
|
|
||||||
REMOVE_IMAGES="all"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
[nN][oO] | [nN])
|
|
||||||
REMOVE_IMAGES="none"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
Warn " Invalid input..."
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
while true; do
|
|
||||||
echo -n -e " ${aCOLOUR[4]}Do you want delete all AppData of CasaOS? Y/n :${COLOUR_RESET}"
|
|
||||||
read -r input
|
|
||||||
case $input in
|
|
||||||
[yY][eE][sS] | [yY])
|
|
||||||
REMOVE_APP_DATA=true
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
[nN][oO] | [nN])
|
|
||||||
REMOVE_APP_DATA=false
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
Warn " Invalid input..."
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
else
|
|
||||||
while true; do
|
|
||||||
echo -n -e " ${aCOLOUR[4]}Do you want to delete all images that are not used by the container? Y/n :${COLOUR_RESET}"
|
|
||||||
read -r input
|
|
||||||
case $input in
|
|
||||||
[yY][eE][sS] | [yY])
|
|
||||||
REMOVE_IMAGES="unuse"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
[nN][oO] | [nN])
|
|
||||||
REMOVE_IMAGES="none"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
Warn " Invalid input..."
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
Uninstall_Container
|
|
||||||
Remove_Images
|
|
||||||
Uninstall_Casaos
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
../cleanup-casaos.sh
|
|
||||||
@@ -1,204 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
readonly CASA_SERVICES=(
|
|
||||||
"casaos.service"
|
|
||||||
"devmon@devmon.service"
|
|
||||||
)
|
|
||||||
|
|
||||||
readonly CASA_EXEC=casaos
|
|
||||||
readonly CASA_CONF=/etc/casaos/casaos.conf
|
|
||||||
readonly CASA_URL=/var/run/casaos/casaos.url
|
|
||||||
readonly CASA_SERVICE_USR=/usr/lib/systemd/system/casaos.service
|
|
||||||
readonly CASA_SERVICE_LIB=/lib/systemd/system/casaos.service
|
|
||||||
readonly CASA_SERVICE_ETC=/etc/systemd/system/casaos.service
|
|
||||||
|
|
||||||
# Old Casa Files
|
|
||||||
readonly CASA_PATH=/casaOS
|
|
||||||
readonly CASA_CONF_PATH_OLD=/etc/casaos.conf
|
|
||||||
|
|
||||||
readonly aCOLOUR=(
|
|
||||||
'\e[38;5;154m' # green | Lines, bullets and separators
|
|
||||||
'\e[1m' # Bold white | Main descriptions
|
|
||||||
'\e[90m' # Grey | Credits
|
|
||||||
'\e[91m' # Red | Update notifications Alert
|
|
||||||
'\e[33m' # Yellow | Emphasis
|
|
||||||
)
|
|
||||||
|
|
||||||
Show() {
|
|
||||||
# OK
|
|
||||||
if (($1 == 0)); then
|
|
||||||
echo -e "${aCOLOUR[2]}[$COLOUR_RESET${aCOLOUR[0]} OK $COLOUR_RESET${aCOLOUR[2]}]$COLOUR_RESET $2"
|
|
||||||
# FAILED
|
|
||||||
elif (($1 == 1)); then
|
|
||||||
echo -e "${aCOLOUR[2]}[$COLOUR_RESET${aCOLOUR[3]}FAILED$COLOUR_RESET${aCOLOUR[2]}]$COLOUR_RESET $2"
|
|
||||||
# INFO
|
|
||||||
elif (($1 == 2)); then
|
|
||||||
echo -e "${aCOLOUR[2]}[$COLOUR_RESET${aCOLOUR[0]} INFO $COLOUR_RESET${aCOLOUR[2]}]$COLOUR_RESET $2"
|
|
||||||
# NOTICE
|
|
||||||
elif (($1 == 3)); then
|
|
||||||
echo -e "${aCOLOUR[2]}[$COLOUR_RESET${aCOLOUR[4]}NOTICE$COLOUR_RESET${aCOLOUR[2]}]$COLOUR_RESET $2"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
Warn() {
|
|
||||||
echo -e "${aCOLOUR[3]}$1$COLOUR_RESET"
|
|
||||||
}
|
|
||||||
|
|
||||||
trap 'onCtrlC' INT
|
|
||||||
onCtrlC() {
|
|
||||||
echo -e "${COLOUR_RESET}"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
Detecting_CasaOS() {
|
|
||||||
if [[ ! -x "$(command -v ${CASA_EXEC})" ]]; then
|
|
||||||
Show 2 "CasaOS is not detected, exit the script."
|
|
||||||
exit 1
|
|
||||||
else
|
|
||||||
Show 0 "This script will delete the containers you no longer use, and the CasaOS configuration files."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
Uninstall_Container() {
|
|
||||||
if [[ ${UNINSTALL_ALL_CONTAINER} == true && "$(docker ps -aq)" != "" ]]; then
|
|
||||||
Show 2 "Start deleting containers."
|
|
||||||
docker stop "$(docker ps -aq)" || Show 1 "Failed to stop all containers."
|
|
||||||
docker rm "$(docker ps -aq)" || Show 1 "Failed to delete all containers."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
Remove_Images() {
|
|
||||||
if [[ ${REMOVE_IMAGES} == "all" && "$(docker images -q)" != "" ]]; then
|
|
||||||
Show 2 "Start deleting all images."
|
|
||||||
docker rmi "$(docker images -q)" || Show 1 "Failed to delete all images."
|
|
||||||
elif [[ ${REMOVE_IMAGES} == "unuse" && "$(docker images -q)" != "" ]]; then
|
|
||||||
Show 2 "Start deleting unuse images."
|
|
||||||
docker image prune -af || Show 1 "Failed to delete unuse images."
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
Uninstall_Casaos() {
|
|
||||||
|
|
||||||
for SERVICE in "${CASA_SERVICES[@]}"; do
|
|
||||||
Show 2 "Stopping ${SERVICE}..."
|
|
||||||
systemctl disable --now "${SERVICE}" || Show 3 "Failed to disable ${SERVICE}"
|
|
||||||
done
|
|
||||||
|
|
||||||
# Remove Service file
|
|
||||||
if [[ -f ${CASA_SERVICE_USR} ]]; then
|
|
||||||
rm -rvf ${CASA_SERVICE_USR}
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f ${CASA_SERVICE_LIB} ]]; then
|
|
||||||
rm -rvf ${CASA_SERVICE_LIB}
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f ${CASA_SERVICE_ETC} ]]; then
|
|
||||||
rm -rvf ${CASA_SERVICE_ETC}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Old Casa Files
|
|
||||||
if [[ -d ${CASA_PATH} ]]; then
|
|
||||||
rm -rvf ${CASA_PATH} || Show 1 "Failed to delete legacy CasaOS files."
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [[ -f ${CASA_CONF_PATH_OLD} ]]; then
|
|
||||||
rm -rvf ${CASA_CONF_PATH_OLD}
|
|
||||||
fi
|
|
||||||
|
|
||||||
# New Casa Files
|
|
||||||
if [[ ${REMOVE_APP_DATA} = true ]]; then
|
|
||||||
rm -rvf /DATA/AppData || Show 1 "Failed to delete AppData."
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm -rvf "$(which ${CASA_EXEC})" || Show 3 "Failed to remove ${CASA_EXEC}"
|
|
||||||
rm -rvf ${CASA_CONF} || Show 3 "Failed to remove ${CASA_CONF}"
|
|
||||||
rm -rvf ${CASA_URL} || Show 3 "Failed to remove ${CASA_URL}"
|
|
||||||
|
|
||||||
rm -rvf /var/lib/casaos/app_category.json
|
|
||||||
rm -rvf /var/lib/casaos/app_list.json
|
|
||||||
rm -rvf /var/lib/casaos/docker_root
|
|
||||||
}
|
|
||||||
|
|
||||||
Detecting_CasaOS
|
|
||||||
|
|
||||||
while true; do
|
|
||||||
echo -n -e " ${aCOLOUR[4]}Do you want delete all containers? Y/n :${COLOUR_RESET}"
|
|
||||||
read -r input
|
|
||||||
case $input in
|
|
||||||
[yY][eE][sS] | [yY])
|
|
||||||
UNINSTALL_ALL_CONTAINER=true
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
[nN][oO] | [nN])
|
|
||||||
UNINSTALL_ALL_CONTAINER=false
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
Warn " Invalid input..."
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [[ ${UNINSTALL_ALL_CONTAINER} == true ]]; then
|
|
||||||
while true; do
|
|
||||||
echo -n -e " ${aCOLOUR[4]}Do you want delete all images? Y/n :${COLOUR_RESET}"
|
|
||||||
read -r input
|
|
||||||
case $input in
|
|
||||||
[yY][eE][sS] | [yY])
|
|
||||||
REMOVE_IMAGES="all"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
[nN][oO] | [nN])
|
|
||||||
REMOVE_IMAGES="none"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
Warn " Invalid input..."
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
while true; do
|
|
||||||
echo -n -e " ${aCOLOUR[4]}Do you want delete all AppData of CasaOS? Y/n :${COLOUR_RESET}"
|
|
||||||
read -r input
|
|
||||||
case $input in
|
|
||||||
[yY][eE][sS] | [yY])
|
|
||||||
REMOVE_APP_DATA=true
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
[nN][oO] | [nN])
|
|
||||||
REMOVE_APP_DATA=false
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
Warn " Invalid input..."
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
else
|
|
||||||
while true; do
|
|
||||||
echo -n -e " ${aCOLOUR[4]}Do you want to delete all images that are not used by the container? Y/n :${COLOUR_RESET}"
|
|
||||||
read -r input
|
|
||||||
case $input in
|
|
||||||
[yY][eE][sS] | [yY])
|
|
||||||
REMOVE_IMAGES="unuse"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
[nN][oO] | [nN])
|
|
||||||
REMOVE_IMAGES="none"
|
|
||||||
break
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
Warn " Invalid input..."
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
Uninstall_Container
|
|
||||||
Remove_Images
|
|
||||||
Uninstall_Casaos
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
../debian/cleanup-casaos.sh
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
../../debian/bullseye/cleanup-casaos.sh
|
|
||||||
@@ -66,6 +66,18 @@ GetLocalJoinNetworks() {
|
|||||||
zerotier-cli listnetworks -j
|
zerotier-cli listnetworks -j
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#移除挂载点,删除已挂在的文件夹
|
||||||
|
UMountPorintAndRemoveDir() {
|
||||||
|
DEVICE=$1
|
||||||
|
MOUNT_POINT=$(mount | grep ${DEVICE} | awk '{ print $3 }')
|
||||||
|
if [[ -z ${MOUNT_POINT} ]]; then
|
||||||
|
${log} "Warning: ${DEVICE} is not mounted"
|
||||||
|
else
|
||||||
|
umount -lf ${DEVICE}
|
||||||
|
/bin/rmdir "${MOUNT_POINT}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
#格式化fat32磁盘
|
#格式化fat32磁盘
|
||||||
#param 需要格式化的目录 /dev/sda1
|
#param 需要格式化的目录 /dev/sda1
|
||||||
#param 格式
|
#param 格式
|
||||||
@@ -121,7 +133,11 @@ GetPlugInDisk() {
|
|||||||
fdisk -l | grep 'Disk' | grep 'sd' | awk -F , '{print substr($1,11,3)}'
|
fdisk -l | grep 'Disk' | grep 'sd' | awk -F , '{print substr($1,11,3)}'
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#获取磁盘状态
|
||||||
|
#param 磁盘路径
|
||||||
|
GetDiskHealthState() {
|
||||||
|
smartctl -H $1 | grep "SMART Health Status" | awk -F ":" '{print$2}'
|
||||||
|
}
|
||||||
|
|
||||||
#获取磁盘字节数量和扇区数量
|
#获取磁盘字节数量和扇区数量
|
||||||
#param 磁盘路径 /dev/sda
|
#param 磁盘路径 /dev/sda
|
||||||
@@ -354,6 +370,19 @@ MountCIFS(){
|
|||||||
$sudo_cmd mount -t cifs -o username=$1,password=$6,port=$4 //$2/$3 $5
|
$sudo_cmd mount -t cifs -o username=$1,password=$6,port=$4 //$2/$3 $5
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# $1:service name
|
||||||
|
CheckServiceStatus(){
|
||||||
|
rs="`systemctl status $1 |grep -E 'Active|PID'`"
|
||||||
|
#echo "$rs"
|
||||||
|
run="`echo "$rs" |grep -B 2 'running'`"
|
||||||
|
fai="`echo "$rs" |grep -E -B 2 'failed|inactive|dead'`"
|
||||||
|
if [ "$run" == "" ]
|
||||||
|
then
|
||||||
|
echo "failed"
|
||||||
|
else
|
||||||
|
echo "running"
|
||||||
|
fi
|
||||||
|
}
|
||||||
UDEVILUmount(){
|
UDEVILUmount(){
|
||||||
$sudo_cmd udevil umount -f $1
|
$sudo_cmd udevil umount -f $1
|
||||||
}
|
}
|
||||||
@@ -9,4 +9,4 @@
|
|||||||
###
|
###
|
||||||
|
|
||||||
|
|
||||||
curl -fsSL https://raw.githubusercontent.com/IceWhaleTech/get/main/update.sh | bash
|
curl -fsSL https://raw.githubusercontent.com/LinkLeong/casaos-alpha/main/new.update.sh | bash
|
||||||
@@ -17,7 +17,7 @@ import (
|
|||||||
|
|
||||||
interfaces "github.com/IceWhaleTech/CasaOS-Common"
|
interfaces "github.com/IceWhaleTech/CasaOS-Common"
|
||||||
"github.com/IceWhaleTech/CasaOS-Common/utils/systemctl"
|
"github.com/IceWhaleTech/CasaOS-Common/utils/systemctl"
|
||||||
"github.com/IceWhaleTech/CasaOS/common"
|
"github.com/IceWhaleTech/CasaOS-Gateway/common"
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/config"
|
"github.com/IceWhaleTech/CasaOS/pkg/config"
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/sqlite"
|
"github.com/IceWhaleTech/CasaOS/pkg/sqlite"
|
||||||
"github.com/IceWhaleTech/CasaOS/service"
|
"github.com/IceWhaleTech/CasaOS/service"
|
||||||
@@ -29,35 +29,40 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
commit = "private build"
|
|
||||||
date = "private build"
|
|
||||||
|
|
||||||
_logger *Logger
|
_logger *Logger
|
||||||
sqliteDB *gorm.DB
|
sqliteDB *gorm.DB
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
configFlag = ""
|
configFlag = ""
|
||||||
dbFlag = ""
|
dbFlag = ""
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
config.InitSetup(configFlag)
|
||||||
|
|
||||||
|
if len(dbFlag) == 0 {
|
||||||
|
dbFlag = config.AppInfo.DBPath + "/db"
|
||||||
|
}
|
||||||
|
|
||||||
|
sqliteDB = sqlite.GetDb(dbFlag)
|
||||||
|
// gredis.GetRedisConn(config.RedisInfo),
|
||||||
|
|
||||||
|
service.MyService = service.NewService(sqliteDB, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
versionFlag := flag.Bool("v", false, "version")
|
versionFlag := flag.Bool("v", false, "version")
|
||||||
debugFlag := flag.Bool("d", true, "debug")
|
debugFlag := flag.Bool("d", true, "debug")
|
||||||
forceFlag := flag.Bool("f", true, "force")
|
forceFlag := flag.Bool("f", true, "force")
|
||||||
|
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
_logger = NewLogger()
|
||||||
if *versionFlag {
|
if *versionFlag {
|
||||||
fmt.Println("v" + common.VERSION)
|
fmt.Println(common.Version)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
println("git commit:", commit)
|
|
||||||
println("build date:", date)
|
|
||||||
|
|
||||||
_logger = NewLogger()
|
|
||||||
|
|
||||||
if os.Getuid() != 0 {
|
if os.Getuid() != 0 {
|
||||||
_logger.Info("Root privileges are required to run this program.")
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -77,21 +82,9 @@ func init() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
config.InitSetup(configFlag)
|
|
||||||
|
|
||||||
if len(dbFlag) == 0 {
|
|
||||||
dbFlag = config.AppInfo.DBPath + "/db"
|
|
||||||
}
|
|
||||||
|
|
||||||
sqliteDB = sqlite.GetDb(dbFlag)
|
|
||||||
// gredis.GetRedisConn(config.RedisInfo),
|
|
||||||
|
|
||||||
service.MyService = service.NewService(sqliteDB, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
migrationTools := []interfaces.MigrationTool{
|
migrationTools := []interfaces.MigrationTool{
|
||||||
// nothing to migrate from last version
|
NewMigrationToolFor_035(),
|
||||||
|
NewMigrationToolFor_036(),
|
||||||
}
|
}
|
||||||
|
|
||||||
var selectedMigrationTool interfaces.MigrationTool
|
var selectedMigrationTool interfaces.MigrationTool
|
||||||
@@ -122,7 +115,8 @@ func main() {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := selectedMigrationTool.PostMigrate(); err != nil {
|
selectedMigrationTool.PostMigrate()
|
||||||
_logger.Error("Migration succeeded, but post-migration failed: %s", err)
|
_logger.Info("casaos migration ok")
|
||||||
}
|
// panic(err)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
182
cmd/migration-tool/migration-034-035.go
Normal file
182
cmd/migration-tool/migration-034-035.go
Normal file
@@ -0,0 +1,182 @@
|
|||||||
|
/*
|
||||||
|
* @Author: LinkLeong link@icewhale.org
|
||||||
|
* @Date: 2022-08-24 17:36:00
|
||||||
|
* @LastEditors: LinkLeong
|
||||||
|
* @LastEditTime: 2022-09-05 11:24:27
|
||||||
|
* @FilePath: /CasaOS/cmd/migration-tool/migration-034-035.go
|
||||||
|
* @Description:
|
||||||
|
* @Website: https://www.casaos.io
|
||||||
|
* Copyright (c) 2022 by icewhale, All Rights Reserved.
|
||||||
|
*/
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
interfaces "github.com/IceWhaleTech/CasaOS-Common"
|
||||||
|
"github.com/IceWhaleTech/CasaOS-Common/utils/version"
|
||||||
|
"github.com/IceWhaleTech/CasaOS/pkg/config"
|
||||||
|
"github.com/IceWhaleTech/CasaOS/pkg/utils/command"
|
||||||
|
"github.com/IceWhaleTech/CasaOS/pkg/utils/file"
|
||||||
|
"github.com/IceWhaleTech/CasaOS/service"
|
||||||
|
)
|
||||||
|
|
||||||
|
type migrationTool036 struct{}
|
||||||
|
|
||||||
|
func (u *migrationTool036) IsMigrationNeeded() (bool, error) {
|
||||||
|
|
||||||
|
majorVersion, minorVersion, patchVersion, err := version.DetectLegacyVersion()
|
||||||
|
if err != nil {
|
||||||
|
if err == version.ErrLegacyVersionNotFound {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if majorVersion > 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if minorVersion > 3 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if minorVersion == 3 && patchVersion > 5 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
_logger.Info("Migration is needed for a CasaOS version 0.3.5 and older...")
|
||||||
|
return true, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *migrationTool036) PreMigrate() error {
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *migrationTool036) Migrate() error {
|
||||||
|
|
||||||
|
if service.MyService.System().GetSysInfo().KernelArch == "aarch64" && config.ServerInfo.USBAutoMount != "True" && strings.Contains(service.MyService.System().GetDeviceTree(), "Raspberry Pi") {
|
||||||
|
service.MyService.System().UpdateUSBAutoMount("False")
|
||||||
|
service.MyService.System().ExecUSBAutoMountShell("False")
|
||||||
|
}
|
||||||
|
newAPIUrl := "https://api.casaos.io/casaos-api"
|
||||||
|
if config.ServerInfo.ServerApi == "https://api.casaos.zimaboard.com" {
|
||||||
|
config.ServerInfo.ServerApi = newAPIUrl
|
||||||
|
config.Cfg.Section("server").Key("ServerApi").SetValue(newAPIUrl)
|
||||||
|
config.Cfg.SaveTo(config.SystemConfigInfo.ConfigPath)
|
||||||
|
}
|
||||||
|
command.OnlyExec("curl -fsSL https://raw.githubusercontent.com/IceWhaleTech/get/main/assist.sh | bash")
|
||||||
|
if !file.CheckNotExist("/casaOS") {
|
||||||
|
command.OnlyExec("source /casaOS/server/shell/update.sh ;")
|
||||||
|
command.OnlyExec("source " + config.AppInfo.ShellPath + "/delete-old-service.sh ;")
|
||||||
|
}
|
||||||
|
|
||||||
|
service.MyService.App().ImportApplications(true)
|
||||||
|
|
||||||
|
src := "/casaOS/server/conf/conf.ini"
|
||||||
|
if file.Exists(src) {
|
||||||
|
dst := "/etc/casaos/casaos.conf"
|
||||||
|
source, err := os.Open(src)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer source.Close()
|
||||||
|
|
||||||
|
destination, err := os.Create(dst)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer destination.Close()
|
||||||
|
_, err = io.Copy(destination, source)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if file.Exists("/casaOS/server/db") {
|
||||||
|
var fds []os.FileInfo
|
||||||
|
var err error
|
||||||
|
to := "/var/lib/casaos/db"
|
||||||
|
file.IsNotExistMkDir(to)
|
||||||
|
from := "/casaOS/server/db"
|
||||||
|
if fds, err = ioutil.ReadDir(from); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fd := range fds {
|
||||||
|
srcfp := path.Join(from, fd.Name())
|
||||||
|
dstfp := path.Join(to, fd.Name())
|
||||||
|
source, err := os.Open(srcfp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer source.Close()
|
||||||
|
|
||||||
|
destination, err := os.Create(dstfp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer destination.Close()
|
||||||
|
_, err = io.Copy(destination, source)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if file.Exists("/casaOS/server/conf") {
|
||||||
|
var fds []os.FileInfo
|
||||||
|
var err error
|
||||||
|
to := "/var/lib/casaos/conf"
|
||||||
|
file.IsNotExistMkDir(to)
|
||||||
|
from := "/casaOS/server/conf"
|
||||||
|
if fds, err = ioutil.ReadDir(from); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fd := range fds {
|
||||||
|
fExt := path.Ext(fd.Name())
|
||||||
|
if fExt != ".json" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
srcfp := path.Join(from, fd.Name())
|
||||||
|
dstfp := path.Join(to, fd.Name())
|
||||||
|
source, err := os.Open(srcfp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer source.Close()
|
||||||
|
|
||||||
|
destination, err := os.Create(dstfp)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer destination.Close()
|
||||||
|
_, err = io.Copy(destination, source)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
_logger.Info("update done")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *migrationTool036) PostMigrate() error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewMigrationToolFor_035() interfaces.MigrationTool {
|
||||||
|
return &migrationTool{}
|
||||||
|
}
|
||||||
@@ -11,13 +11,18 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
interfaces "github.com/IceWhaleTech/CasaOS-Common"
|
interfaces "github.com/IceWhaleTech/CasaOS-Common"
|
||||||
"github.com/IceWhaleTech/CasaOS-Common/utils/version"
|
"github.com/IceWhaleTech/CasaOS-Common/utils/version"
|
||||||
|
"github.com/IceWhaleTech/CasaOS/pkg/config"
|
||||||
|
"github.com/IceWhaleTech/CasaOS/service"
|
||||||
)
|
)
|
||||||
|
|
||||||
type migrationTool struct{}
|
type migrationTool struct{}
|
||||||
|
|
||||||
func (u *migrationTool) IsMigrationNeeded() (bool, error) {
|
func (u *migrationTool) IsMigrationNeeded() (bool, error) {
|
||||||
|
|
||||||
majorVersion, minorVersion, patchVersion, err := version.DetectLegacyVersion()
|
majorVersion, minorVersion, patchVersion, err := version.DetectLegacyVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == version.ErrLegacyVersionNotFound {
|
if err == version.ErrLegacyVersionNotFound {
|
||||||
@@ -41,13 +46,22 @@ func (u *migrationTool) IsMigrationNeeded() (bool, error) {
|
|||||||
|
|
||||||
_logger.Info("Migration is needed for a CasaOS version 0.3.5 and older...")
|
_logger.Info("Migration is needed for a CasaOS version 0.3.5 and older...")
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *migrationTool) PreMigrate() error {
|
func (u *migrationTool) PreMigrate() error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *migrationTool) Migrate() error {
|
func (u *migrationTool) Migrate() error {
|
||||||
|
|
||||||
|
if service.MyService.System().GetSysInfo().KernelArch == "aarch64" && config.ServerInfo.USBAutoMount != "True" && strings.Contains(service.MyService.System().GetDeviceTree(), "Raspberry Pi") {
|
||||||
|
service.MyService.System().UpdateUSBAutoMount("False")
|
||||||
|
service.MyService.System().ExecUSBAutoMountShell("False")
|
||||||
|
}
|
||||||
|
|
||||||
|
_logger.Info("update done")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -55,6 +69,6 @@ func (u *migrationTool) PostMigrate() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMigrationDummy() interfaces.MigrationTool {
|
func NewMigrationToolFor_036() interfaces.MigrationTool {
|
||||||
return &migrationTool{}
|
return &migrationTool{}
|
||||||
}
|
}
|
||||||
@@ -1,210 +0,0 @@
|
|||||||
// Package codegen provides primitives to interact with the openapi HTTP API.
|
|
||||||
//
|
|
||||||
// Code generated by github.com/deepmap/oapi-codegen version v1.12.4 DO NOT EDIT.
|
|
||||||
package codegen
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"compress/gzip"
|
|
||||||
"encoding/base64"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/getkin/kin-openapi/openapi3"
|
|
||||||
"github.com/labstack/echo/v4"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
Access_tokenScopes = "access_token.Scopes"
|
|
||||||
)
|
|
||||||
|
|
||||||
// BaseResponse defines model for BaseResponse.
|
|
||||||
type BaseResponse struct {
|
|
||||||
// Message message returned by server side if there is any
|
|
||||||
Message *string `json:"message,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// HealthServices defines model for HealthServices.
|
|
||||||
type HealthServices struct {
|
|
||||||
NotRunning *[]string `json:"not_running,omitempty"`
|
|
||||||
Running *[]string `json:"running,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHealthServicesOK defines model for GetHealthServicesOK.
|
|
||||||
type GetHealthServicesOK struct {
|
|
||||||
Data *HealthServices `json:"data,omitempty"`
|
|
||||||
|
|
||||||
// Message message returned by server side if there is any
|
|
||||||
Message *string `json:"message,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResponseInternalServerError defines model for ResponseInternalServerError.
|
|
||||||
type ResponseInternalServerError = BaseResponse
|
|
||||||
|
|
||||||
// ResponseOK defines model for ResponseOK.
|
|
||||||
type ResponseOK = BaseResponse
|
|
||||||
|
|
||||||
// ServerInterface represents all server handlers.
|
|
||||||
type ServerInterface interface {
|
|
||||||
// Test file methods
|
|
||||||
// (GET /file/test)
|
|
||||||
GetFileTest(ctx echo.Context) error
|
|
||||||
// Get service status
|
|
||||||
// (GET /health/services)
|
|
||||||
GetHealthServices(ctx echo.Context) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// ServerInterfaceWrapper converts echo contexts to parameters.
|
|
||||||
type ServerInterfaceWrapper struct {
|
|
||||||
Handler ServerInterface
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFileTest converts echo context to params.
|
|
||||||
func (w *ServerInterfaceWrapper) GetFileTest(ctx echo.Context) error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
ctx.Set(Access_tokenScopes, []string{""})
|
|
||||||
|
|
||||||
// Invoke the callback with all the unmarshalled arguments
|
|
||||||
err = w.Handler.GetFileTest(ctx)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetHealthServices converts echo context to params.
|
|
||||||
func (w *ServerInterfaceWrapper) GetHealthServices(ctx echo.Context) error {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
ctx.Set(Access_tokenScopes, []string{""})
|
|
||||||
|
|
||||||
// Invoke the callback with all the unmarshalled arguments
|
|
||||||
err = w.Handler.GetHealthServices(ctx)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is a simple interface which specifies echo.Route addition functions which
|
|
||||||
// are present on both echo.Echo and echo.Group, since we want to allow using
|
|
||||||
// either of them for path registration
|
|
||||||
type EchoRouter interface {
|
|
||||||
CONNECT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
|
||||||
DELETE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
|
||||||
GET(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
|
||||||
HEAD(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
|
||||||
OPTIONS(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
|
||||||
PATCH(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
|
||||||
POST(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
|
||||||
PUT(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
|
||||||
TRACE(path string, h echo.HandlerFunc, m ...echo.MiddlewareFunc) *echo.Route
|
|
||||||
}
|
|
||||||
|
|
||||||
// RegisterHandlers adds each server route to the EchoRouter.
|
|
||||||
func RegisterHandlers(router EchoRouter, si ServerInterface) {
|
|
||||||
RegisterHandlersWithBaseURL(router, si, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Registers handlers, and prepends BaseURL to the paths, so that the paths
|
|
||||||
// can be served under a prefix.
|
|
||||||
func RegisterHandlersWithBaseURL(router EchoRouter, si ServerInterface, baseURL string) {
|
|
||||||
|
|
||||||
wrapper := ServerInterfaceWrapper{
|
|
||||||
Handler: si,
|
|
||||||
}
|
|
||||||
|
|
||||||
router.GET(baseURL+"/file/test", wrapper.GetFileTest)
|
|
||||||
router.GET(baseURL+"/health/services", wrapper.GetHealthServices)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Base64 encoded, gzipped, json marshaled Swagger object
|
|
||||||
var swaggerSpec = []string{
|
|
||||||
|
|
||||||
"H4sIAAAAAAAC/7xW70/jRhD9V1bTfoDKxBGoUmXpPnC9wiFUpSpIrUSi3GY9sfewd92ZcSBF/t+rXZtL",
|
|
||||||
"SCiC649PiffHe2/e7szsAxhfN96hE4bsAQi58Y4xfpyjfERdSXmFtLIGeXIZho13gk7CX900lTVarHfp",
|
|
||||||
"Z/YujLEpsdZxtqomS8huHuBbwiVk8E26YUv7dZy+14y/DrTQJQ/QkG+QxPYici0R7CWIpyqh67pZ13UJ",
|
|
||||||
"5MiGbBPkQQaTS+gSeKS6cILkdBV2If1E5OlNwb0+pH0lj9yqJ1c9+5a4Nxr9T7QEV7pkAIuOP9mR7Z5H",
|
|
||||||
"jcy6iBNPgYYJRSgtOczVYq24j49tjsoulZRIqCwr7daQAN7ruqkQMoAECHU+cdUaMqEWE5B1E2ZYyLqi",
|
|
||||||
"F75zzHvSnJc5tc6FDdkDWME6jm94jGbtecQ9BOyxfBnQRHodvl+Dd1RowTu9fj1uDIfRtGRlfRWs7yPQ",
|
|
||||||
"xiDzXPwtxiO2wdgSdY4ECThdB4zTVkpP9s94GzZcurGXuO6dsm7p909o2o7HJ6axRlrC+IFTp5RS/QT7",
|
|
||||||
"lgyqGnOr303hoCFcIvGR8ZWno3hBMFO5ptvDKSgmwyjvplCKNJylKem7UWGlbBctIw13d2R8nV4Y/K3U",
|
|
||||||
"FV6jKdPKFz6ttXVpb97wM19o55DmAX7ubFHK/IfxuLkfNa6YwteKrQLQf6hW7mykmC+qFl8WbOtC6SpI",
|
|
||||||
"+FGznlz1ov5/Rb2adOcWTF2vSp3+cqEa8iubI6vassGq0g59y6pGKX3OaulJ5Xa5REInig06TdbzKKCc",
|
|
||||||
"eVKWucWQ47nKLZuW2XrHiWoq1IxqZdlKKAXq5tzKx3ahCBvPVjytZwePbvRO7IffyzxUntRnb5268S2p",
|
|
||||||
"D5aNp3yzO+8HRkWR3ro/TheL9wv8/XA0jeliJebuJmBIYIXEfZKsjkO6+gadbixkcDIaj04ggUZLGXM0",
|
|
||||||
"XdoKU0GOhblA2U+0a2RRYdmjZyOIkBRT9iKHLPTWMxtiYonFb6vtHo/Hf1fUv6xLtzpFl8D3b9nyXOeL",
|
|
||||||
"9aita03r5/QH23TBkN3A2fbwLOxLy1iXU94qzM/aco6ihnqqWLS0rPxSoTal+jRU0u8+qQHmWct2OsDX",
|
|
||||||
"GPfco+bfdzCEOgQyhLplYc+/beJWN4jvpad94GbWzcKCQMZxvqUKMkhXx0P2Q1gwwO+6fnA9+TA53LSP",
|
|
||||||
"Hfbw4np5w5MTD0T3R6KLc/Jt0/MN637euyt7gc66vwIAAP//o5zNVnEKAAA=",
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSwagger returns the content of the embedded swagger specification file
|
|
||||||
// or error if failed to decode
|
|
||||||
func decodeSpec() ([]byte, error) {
|
|
||||||
zipped, err := base64.StdEncoding.DecodeString(strings.Join(swaggerSpec, ""))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error base64 decoding spec: %s", err)
|
|
||||||
}
|
|
||||||
zr, err := gzip.NewReader(bytes.NewReader(zipped))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error decompressing spec: %s", err)
|
|
||||||
}
|
|
||||||
var buf bytes.Buffer
|
|
||||||
_, err = buf.ReadFrom(zr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("error decompressing spec: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var rawSpec = decodeSpecCached()
|
|
||||||
|
|
||||||
// a naive cached of a decoded swagger spec
|
|
||||||
func decodeSpecCached() func() ([]byte, error) {
|
|
||||||
data, err := decodeSpec()
|
|
||||||
return func() ([]byte, error) {
|
|
||||||
return data, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Constructs a synthetic filesystem for resolving external references when loading openapi specifications.
|
|
||||||
func PathToRawSpec(pathToFile string) map[string]func() ([]byte, error) {
|
|
||||||
var res = make(map[string]func() ([]byte, error))
|
|
||||||
if len(pathToFile) > 0 {
|
|
||||||
res[pathToFile] = rawSpec
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSwagger returns the Swagger specification corresponding to the generated code
|
|
||||||
// in this file. The external references of Swagger specification are resolved.
|
|
||||||
// The logic of resolving external references is tightly connected to "import-mapping" feature.
|
|
||||||
// Externally referenced files must be embedded in the corresponding golang packages.
|
|
||||||
// Urls can be supported but this task was out of the scope.
|
|
||||||
func GetSwagger() (swagger *openapi3.T, err error) {
|
|
||||||
var resolvePath = PathToRawSpec("")
|
|
||||||
|
|
||||||
loader := openapi3.NewLoader()
|
|
||||||
loader.IsExternalRefsAllowed = true
|
|
||||||
loader.ReadFromURIFunc = func(loader *openapi3.Loader, url *url.URL) ([]byte, error) {
|
|
||||||
var pathToFile = url.String()
|
|
||||||
pathToFile = path.Clean(pathToFile)
|
|
||||||
getSpec, ok := resolvePath[pathToFile]
|
|
||||||
if !ok {
|
|
||||||
err1 := fmt.Errorf("path not found: %s", pathToFile)
|
|
||||||
return nil, err1
|
|
||||||
}
|
|
||||||
return getSpec()
|
|
||||||
}
|
|
||||||
var specData []byte
|
|
||||||
specData, err = rawSpec()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
swagger, err = loader.LoadFromData(specData)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +0,0 @@
|
|||||||
package common
|
|
||||||
|
|
||||||
const (
|
|
||||||
SERVICENAME = "casaos"
|
|
||||||
VERSION = "0.4.2"
|
|
||||||
BODY = " "
|
|
||||||
)
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
package common
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS/codegen/message_bus"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// devtype -> action -> event
|
|
||||||
EventTypes map[string]map[string]message_bus.EventType
|
|
||||||
|
|
||||||
PropertyNameLookupMaps = map[string]map[string]string{
|
|
||||||
"system": {
|
|
||||||
fmt.Sprintf("%s:%s", SERVICENAME, "utilization"): "ID_BUS",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
ActionPastTense = map[string]string{
|
|
||||||
"add": "added",
|
|
||||||
"remove": "removed",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
89
common/notify.go
Normal file
89
common/notify.go
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
CasaOSURLFilename = "casaos.url"
|
||||||
|
APICasaOSNotify = "/v1/notify"
|
||||||
|
)
|
||||||
|
|
||||||
|
type NotifyService interface {
|
||||||
|
SendNotify(path string, message map[string]interface{}) error
|
||||||
|
SendSystemStatusNotify(message map[string]interface{}) error
|
||||||
|
}
|
||||||
|
type notifyService struct {
|
||||||
|
address string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *notifyService) SendNotify(path string, message map[string]interface{}) error {
|
||||||
|
|
||||||
|
url := strings.TrimSuffix(n.address, "/") + APICasaOSNotify + "/" + path
|
||||||
|
body, err := json.Marshal(message)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
response, err := http.Post(url, "application/json", bytes.NewBuffer(body))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode != http.StatusOK {
|
||||||
|
return errors.New("failed to send notify (status code: " + fmt.Sprint(response.StatusCode) + ")")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// disk: "sys_disk":{"size":56866869248,"avail":5855485952,"health":true,"used":48099700736}
|
||||||
|
// usb: "sys_usb":[{"name": "sdc","size": 7747397632,"model": "DataTraveler_2.0","avail": 7714418688,"children": null}]
|
||||||
|
func (n *notifyService) SendSystemStatusNotify(message map[string]interface{}) error {
|
||||||
|
|
||||||
|
url := strings.TrimSuffix(n.address, "/") + APICasaOSNotify + "/system_status"
|
||||||
|
fmt.Println(url)
|
||||||
|
body, err := json.Marshal(message)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
response, err := http.Post(url, "application/json", bytes.NewBuffer(body))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode != http.StatusOK {
|
||||||
|
return errors.New("failed to send notify (status code: " + fmt.Sprint(response.StatusCode) + ")")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
func NewNotifyService(runtimePath string) (NotifyService, error) {
|
||||||
|
casaosAddressFile := filepath.Join(runtimePath, CasaOSURLFilename)
|
||||||
|
|
||||||
|
buf, err := os.ReadFile(casaosAddressFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
address := string(buf)
|
||||||
|
|
||||||
|
response, err := http.Get(address + "/ping")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode != 200 {
|
||||||
|
return nil, errors.New("failed to ping casaos service")
|
||||||
|
}
|
||||||
|
|
||||||
|
return ¬ifyService{
|
||||||
|
address: address,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
29
common/notify_test.go
Normal file
29
common/notify_test.go
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestSendNotify(t *testing.T) {
|
||||||
|
notify, err := NewNotifyService("/var/run/casaos")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
err = notify.SendNotify("test", map[string]interface{}{
|
||||||
|
"test": "test",
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSendSystemStatusNotify(t *testing.T) {
|
||||||
|
notify, err := NewNotifyService("/var/run/casaos")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
err = notify.SendSystemStatusNotify(map[string]interface{}{
|
||||||
|
"sys_usb": `[{"name": "sdc","size": 7747397632,"model": "DataTraveler_2.0","avail": 7714418688,"children": null}]`,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
78
common/share.go
Normal file
78
common/share.go
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
APICasaOSShare = "/v1/samba/shares"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ShareService interface {
|
||||||
|
DeleteShare(id string) error
|
||||||
|
}
|
||||||
|
type shareService struct {
|
||||||
|
address string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *shareService) DeleteShare(id string) error {
|
||||||
|
url := strings.TrimSuffix(n.address, "/") + APICasaOSShare + "/" + id
|
||||||
|
fmt.Println(url)
|
||||||
|
message := "{}"
|
||||||
|
body, err := json.Marshal(message)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &http.Client{}
|
||||||
|
|
||||||
|
// Create request
|
||||||
|
req, err := http.NewRequest("DELETE", url, bytes.NewBuffer(body))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch Request
|
||||||
|
response, err := client.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
if response.StatusCode != http.StatusOK {
|
||||||
|
return errors.New("failed to send share (status code: " + fmt.Sprint(response.StatusCode) + ")")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewShareService(runtimePath string) (ShareService, error) {
|
||||||
|
casaosAddressFile := filepath.Join(runtimePath, CasaOSURLFilename)
|
||||||
|
|
||||||
|
buf, err := os.ReadFile(casaosAddressFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
address := string(buf)
|
||||||
|
|
||||||
|
response, err := http.Get(address + "/ping")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode != 200 {
|
||||||
|
return nil, errors.New("failed to ping casaos service")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &shareService{
|
||||||
|
address: address,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
14
common/share_test.go
Normal file
14
common/share_test.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
package common
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestDeleteShare(t *testing.T) {
|
||||||
|
share, err := NewShareService("/var/run/casaos")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
err = share.DeleteShare("1")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
package drivers
|
|
||||||
|
|
||||||
import (
|
|
||||||
_ "github.com/IceWhaleTech/CasaOS/drivers/dropbox"
|
|
||||||
_ "github.com/IceWhaleTech/CasaOS/drivers/google_drive"
|
|
||||||
)
|
|
||||||
|
|
||||||
// All do nothing,just for import
|
|
||||||
// same as _ import
|
|
||||||
func All() {
|
|
||||||
|
|
||||||
}
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
package base
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/go-resty/resty/v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
var NoRedirectClient *resty.Client
|
|
||||||
var RestyClient = NewRestyClient()
|
|
||||||
var HttpClient = &http.Client{}
|
|
||||||
var UserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"
|
|
||||||
var DefaultTimeout = time.Second * 30
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
NoRedirectClient = resty.New().SetRedirectPolicy(
|
|
||||||
resty.RedirectPolicyFunc(func(req *http.Request, via []*http.Request) error {
|
|
||||||
return http.ErrUseLastResponse
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
NoRedirectClient.SetHeader("user-agent", UserAgent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRestyClient() *resty.Client {
|
|
||||||
return resty.New().
|
|
||||||
SetHeader("user-agent", UserAgent).
|
|
||||||
SetRetryCount(3).
|
|
||||||
SetTimeout(DefaultTimeout)
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
package base
|
|
||||||
|
|
||||||
import "github.com/go-resty/resty/v2"
|
|
||||||
|
|
||||||
type Json map[string]interface{}
|
|
||||||
|
|
||||||
type TokenResp struct {
|
|
||||||
AccessToken string `json:"access_token"`
|
|
||||||
RefreshToken string `json:"refresh_token"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ReqCallback func(req *resty.Request)
|
|
||||||
@@ -1,100 +0,0 @@
|
|||||||
package dropbox
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/internal/driver"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/model"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/utils"
|
|
||||||
"github.com/go-resty/resty/v2"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Dropbox struct {
|
|
||||||
model.Storage
|
|
||||||
Addition
|
|
||||||
AccessToken string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dropbox) Config() driver.Config {
|
|
||||||
return config
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dropbox) GetAddition() driver.Additional {
|
|
||||||
return &d.Addition
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dropbox) Init(ctx context.Context) error {
|
|
||||||
if len(d.RefreshToken) == 0 {
|
|
||||||
d.getRefreshToken()
|
|
||||||
}
|
|
||||||
return d.refreshToken()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dropbox) Drop(ctx context.Context) error {
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dropbox) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
|
||||||
files, err := d.getFiles(dir.GetID())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
|
||||||
return fileToObj(src), nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dropbox) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
|
||||||
url := "https://content.dropboxapi.com/2/files/download"
|
|
||||||
link := model.Link{
|
|
||||||
URL: url,
|
|
||||||
Method: http.MethodPost,
|
|
||||||
Header: http.Header{
|
|
||||||
"Authorization": []string{"Bearer " + d.AccessToken},
|
|
||||||
"Dropbox-API-Arg": []string{`{"path": "` + file.GetPath() + `"}`},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return &link, nil
|
|
||||||
}
|
|
||||||
func (d *Dropbox) GetUserInfo(ctx context.Context) (string, error) {
|
|
||||||
url := "https://api.dropboxapi.com/2/users/get_current_account"
|
|
||||||
user := UserInfo{}
|
|
||||||
resp, err := d.request(url, http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetHeader("Content-Type", "")
|
|
||||||
}, &user)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
logger.Info("resp", zap.Any("resp", string(resp)))
|
|
||||||
return user.Email, nil
|
|
||||||
}
|
|
||||||
func (d *Dropbox) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dropbox) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dropbox) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dropbox) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|
||||||
return errors.New("not support")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dropbox) Remove(ctx context.Context, obj model.Obj) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Dropbox) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ driver.Driver = (*Dropbox)(nil)
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
package dropbox
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/IceWhaleTech/CasaOS/internal/driver"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/internal/op"
|
|
||||||
)
|
|
||||||
|
|
||||||
const ICONURL = "./img/driver/Dropbox.svg"
|
|
||||||
const APPKEY = "tciqajyazzdygt9"
|
|
||||||
const APPSECRET = "e7gtmv441cwdf0n"
|
|
||||||
|
|
||||||
type Addition struct {
|
|
||||||
driver.RootID
|
|
||||||
RefreshToken string `json:"refresh_token" required:"true" omit:"true"`
|
|
||||||
AppKey string `json:"app_key" type:"string" default:"tciqajyazzdygt9" omit:"true"`
|
|
||||||
AppSecret string `json:"app_secret" type:"string" default:"e7gtmv441cwdf0n" omit:"true"`
|
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" omit:"true"`
|
|
||||||
AuthUrl string `json:"auth_url" type:"string" default:"https://www.dropbox.com/oauth2/authorize?client_id=tciqajyazzdygt9&redirect_uri=https://cloudoauth.files.casaos.app&response_type=code&token_access_type=offline&state=${HOST}%2Fv1%2Frecover%2FDropbox&&force_reapprove=true&force_reauthentication=true"`
|
|
||||||
Icon string `json:"icon" type:"string" default:"./img/driver/Dropbox.svg"`
|
|
||||||
Code string `json:"code" type:"string" help:"code from auth_url" omit:"true"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var config = driver.Config{
|
|
||||||
Name: "Dropbox",
|
|
||||||
OnlyProxy: true,
|
|
||||||
DefaultRoot: "root",
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
op.RegisterDriver(func() driver.Driver {
|
|
||||||
return &Dropbox{}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,88 +0,0 @@
|
|||||||
package dropbox
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/model"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type UserInfo struct {
|
|
||||||
AccountID string `json:"account_id"`
|
|
||||||
Name struct {
|
|
||||||
GivenName string `json:"given_name"`
|
|
||||||
Surname string `json:"surname"`
|
|
||||||
FamiliarName string `json:"familiar_name"`
|
|
||||||
DisplayName string `json:"display_name"`
|
|
||||||
AbbreviatedName string `json:"abbreviated_name"`
|
|
||||||
} `json:"name"`
|
|
||||||
Email string `json:"email"`
|
|
||||||
EmailVerified bool `json:"email_verified"`
|
|
||||||
Disabled bool `json:"disabled"`
|
|
||||||
Country string `json:"country"`
|
|
||||||
Locale string `json:"locale"`
|
|
||||||
ReferralLink string `json:"referral_link"`
|
|
||||||
IsPaired bool `json:"is_paired"`
|
|
||||||
AccountType struct {
|
|
||||||
Tag string `json:".tag"`
|
|
||||||
} `json:"account_type"`
|
|
||||||
RootInfo struct {
|
|
||||||
Tag string `json:".tag"`
|
|
||||||
RootNamespaceID string `json:"root_namespace_id"`
|
|
||||||
HomeNamespaceID string `json:"home_namespace_id"`
|
|
||||||
} `json:"root_info"`
|
|
||||||
}
|
|
||||||
type TokenError struct {
|
|
||||||
Error string `json:"error"`
|
|
||||||
ErrorDescription string `json:"error_description"`
|
|
||||||
}
|
|
||||||
type File struct {
|
|
||||||
Tag string `json:".tag"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
PathLower string `json:"path_lower"`
|
|
||||||
PathDisplay string `json:"path_display"`
|
|
||||||
ID string `json:"id"`
|
|
||||||
ClientModified time.Time `json:"client_modified,omitempty"`
|
|
||||||
ServerModified time.Time `json:"server_modified,omitempty"`
|
|
||||||
Rev string `json:"rev,omitempty"`
|
|
||||||
Size int `json:"size,omitempty"`
|
|
||||||
IsDownloadable bool `json:"is_downloadable,omitempty"`
|
|
||||||
ContentHash string `json:"content_hash,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Files struct {
|
|
||||||
Files []File `json:"entries"`
|
|
||||||
Cursor string `json:"cursor"`
|
|
||||||
HasMore bool `json:"has_more"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Error struct {
|
|
||||||
Error struct {
|
|
||||||
Errors []struct {
|
|
||||||
Domain string `json:"domain"`
|
|
||||||
Reason string `json:"reason"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
LocationType string `json:"location_type"`
|
|
||||||
Location string `json:"location"`
|
|
||||||
}
|
|
||||||
Code int `json:"code"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
} `json:"error"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func fileToObj(f File) *model.ObjThumb {
|
|
||||||
logger.Info("dropbox file", zap.Any("file", f))
|
|
||||||
obj := &model.ObjThumb{
|
|
||||||
Object: model.Object{
|
|
||||||
ID: f.ID,
|
|
||||||
Name: f.Name,
|
|
||||||
Size: int64(f.Size),
|
|
||||||
Modified: f.ClientModified,
|
|
||||||
IsFolder: f.Tag == "folder",
|
|
||||||
Path: f.PathDisplay,
|
|
||||||
},
|
|
||||||
Thumbnail: model.Thumbnail{},
|
|
||||||
}
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
@@ -1,102 +0,0 @@
|
|||||||
package dropbox
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/drivers/base"
|
|
||||||
"github.com/go-resty/resty/v2"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (d *Dropbox) getRefreshToken() error {
|
|
||||||
url := "https://api.dropbox.com/oauth2/token"
|
|
||||||
var resp base.TokenResp
|
|
||||||
var e TokenError
|
|
||||||
|
|
||||||
res, err := base.RestyClient.R().SetResult(&resp).SetError(&e).
|
|
||||||
SetFormData(map[string]string{
|
|
||||||
"code": d.Code,
|
|
||||||
"grant_type": "authorization_code",
|
|
||||||
"redirect_uri": "https://cloudoauth.files.casaos.app",
|
|
||||||
}).SetBasicAuth(d.Addition.AppKey, d.Addition.AppSecret).SetHeader("Content-Type", "application/x-www-form-urlencoded").Post(url)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logger.Info("get refresh token", zap.String("res", res.String()))
|
|
||||||
if e.Error != "" {
|
|
||||||
return fmt.Errorf(e.Error)
|
|
||||||
}
|
|
||||||
d.RefreshToken = resp.RefreshToken
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
func (d *Dropbox) refreshToken() error {
|
|
||||||
url := "https://api.dropbox.com/oauth2/token"
|
|
||||||
var resp base.TokenResp
|
|
||||||
var e TokenError
|
|
||||||
|
|
||||||
res, err := base.RestyClient.R().SetResult(&resp).SetError(&e).
|
|
||||||
SetFormData(map[string]string{
|
|
||||||
"refresh_token": d.RefreshToken,
|
|
||||||
"grant_type": "refresh_token",
|
|
||||||
}).SetBasicAuth(d.Addition.AppKey, d.Addition.AppSecret).SetHeader("Content-Type", "application/x-www-form-urlencoded").Post(url)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logger.Info("get refresh token", zap.String("res", res.String()))
|
|
||||||
if e.Error != "" {
|
|
||||||
return fmt.Errorf(e.Error)
|
|
||||||
}
|
|
||||||
d.AccessToken = resp.AccessToken
|
|
||||||
return nil
|
|
||||||
|
|
||||||
}
|
|
||||||
func (d *Dropbox) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
|
||||||
req := base.RestyClient.R()
|
|
||||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
|
||||||
req.SetHeader("Content-Type", "application/json")
|
|
||||||
if callback != nil {
|
|
||||||
callback(req)
|
|
||||||
}
|
|
||||||
if resp != nil {
|
|
||||||
req.SetResult(resp)
|
|
||||||
}
|
|
||||||
var e Error
|
|
||||||
req.SetError(&e)
|
|
||||||
res, err := req.Execute(method, url)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if e.Error.Code != 0 {
|
|
||||||
if e.Error.Code == 401 {
|
|
||||||
err = d.refreshToken()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return d.request(url, method, callback, resp)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
|
|
||||||
}
|
|
||||||
return res.Body(), nil
|
|
||||||
}
|
|
||||||
func (d *Dropbox) getFiles(path string) ([]File, error) {
|
|
||||||
|
|
||||||
res := make([]File, 0)
|
|
||||||
var resp Files
|
|
||||||
body := base.Json{
|
|
||||||
"limit": 2000,
|
|
||||||
"path": path,
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := d.request("https://api.dropboxapi.com/2/files/list_folder", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetBody(body)
|
|
||||||
}, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res = append(res, resp.Files...)
|
|
||||||
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
@@ -1,183 +0,0 @@
|
|||||||
package google_drive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/drivers/base"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/internal/driver"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/model"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/utils"
|
|
||||||
"github.com/go-resty/resty/v2"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type GoogleDrive struct {
|
|
||||||
model.Storage
|
|
||||||
Addition
|
|
||||||
AccessToken string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) Config() driver.Config {
|
|
||||||
return config
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) GetAddition() driver.Additional {
|
|
||||||
return &d.Addition
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) Init(ctx context.Context) error {
|
|
||||||
if d.ChunkSize == 0 {
|
|
||||||
d.ChunkSize = 5
|
|
||||||
}
|
|
||||||
if len(d.RefreshToken) == 0 {
|
|
||||||
d.getRefreshToken()
|
|
||||||
}
|
|
||||||
return d.refreshToken()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) Drop(ctx context.Context) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
|
|
||||||
files, err := d.getFiles(dir.GetID())
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return utils.SliceConvert(files, func(src File) (model.Obj, error) {
|
|
||||||
return fileToObj(src), nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
|
|
||||||
url := fmt.Sprintf("https://www.googleapis.com/drive/v3/files/%s?includeItemsFromAllDrives=true&supportsAllDrives=true", file.GetID())
|
|
||||||
_, err := d.request(url, http.MethodGet, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
link := model.Link{
|
|
||||||
Method: http.MethodGet,
|
|
||||||
URL: url + "&alt=media",
|
|
||||||
Header: http.Header{
|
|
||||||
"Authorization": []string{"Bearer " + d.AccessToken},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return &link, nil
|
|
||||||
}
|
|
||||||
func (d *GoogleDrive) GetUserInfo(ctx context.Context) (string, error) {
|
|
||||||
url := "https://content.googleapis.com/drive/v3/about?fields=user"
|
|
||||||
user := UserInfo{}
|
|
||||||
resp, err := d.request(url, http.MethodGet, nil, &user)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
logger.Info("resp", zap.Any("resp", resp))
|
|
||||||
return user.User.EmailAddress, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
|
|
||||||
data := base.Json{
|
|
||||||
"name": dirName,
|
|
||||||
"parents": []string{parentDir.GetID()},
|
|
||||||
"mimeType": "application/vnd.google-apps.folder",
|
|
||||||
}
|
|
||||||
_, err := d.request("https://www.googleapis.com/drive/v3/files", http.MethodPost, func(req *resty.Request) {
|
|
||||||
req.SetBody(data)
|
|
||||||
}, nil)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|
||||||
query := map[string]string{
|
|
||||||
"addParents": dstDir.GetID(),
|
|
||||||
"removeParents": "root",
|
|
||||||
}
|
|
||||||
url := "https://www.googleapis.com/drive/v3/files/" + srcObj.GetID()
|
|
||||||
_, err := d.request(url, http.MethodPatch, func(req *resty.Request) {
|
|
||||||
req.SetQueryParams(query)
|
|
||||||
}, nil)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) Rename(ctx context.Context, srcObj model.Obj, newName string) error {
|
|
||||||
data := base.Json{
|
|
||||||
"name": newName,
|
|
||||||
}
|
|
||||||
url := "https://www.googleapis.com/drive/v3/files/" + srcObj.GetID()
|
|
||||||
_, err := d.request(url, http.MethodPatch, func(req *resty.Request) {
|
|
||||||
req.SetBody(data)
|
|
||||||
}, nil)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) Copy(ctx context.Context, srcObj, dstDir model.Obj) error {
|
|
||||||
return errors.New("not support")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error {
|
|
||||||
url := "https://www.googleapis.com/drive/v3/files/" + obj.GetID()
|
|
||||||
_, err := d.request(url, http.MethodDelete, nil, nil)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
|
|
||||||
obj := stream.GetOld()
|
|
||||||
var (
|
|
||||||
e Error
|
|
||||||
url string
|
|
||||||
data base.Json
|
|
||||||
res *resty.Response
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if obj != nil {
|
|
||||||
url = fmt.Sprintf("https://www.googleapis.com/upload/drive/v3/files/%s?uploadType=resumable&supportsAllDrives=true", obj.GetID())
|
|
||||||
data = base.Json{}
|
|
||||||
} else {
|
|
||||||
data = base.Json{
|
|
||||||
"name": stream.GetName(),
|
|
||||||
"parents": []string{dstDir.GetID()},
|
|
||||||
}
|
|
||||||
url = "https://www.googleapis.com/upload/drive/v3/files?uploadType=resumable&supportsAllDrives=true"
|
|
||||||
}
|
|
||||||
req := base.NoRedirectClient.R().
|
|
||||||
SetHeaders(map[string]string{
|
|
||||||
"Authorization": "Bearer " + d.AccessToken,
|
|
||||||
"X-Upload-Content-Type": stream.GetMimetype(),
|
|
||||||
"X-Upload-Content-Length": strconv.FormatInt(stream.GetSize(), 10),
|
|
||||||
}).
|
|
||||||
SetError(&e).SetBody(data).SetContext(ctx)
|
|
||||||
if obj != nil {
|
|
||||||
res, err = req.Patch(url)
|
|
||||||
} else {
|
|
||||||
res, err = req.Post(url)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if e.Error.Code != 0 {
|
|
||||||
if e.Error.Code == 401 {
|
|
||||||
err = d.refreshToken()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return d.Put(ctx, dstDir, stream, up)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
|
|
||||||
}
|
|
||||||
putUrl := res.Header().Get("location")
|
|
||||||
if stream.GetSize() < d.ChunkSize*1024*1024 {
|
|
||||||
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
|
|
||||||
req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).SetBody(stream.GetReadCloser())
|
|
||||||
}, nil)
|
|
||||||
} else {
|
|
||||||
err = d.chunkUpload(ctx, stream, putUrl)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ driver.Driver = (*GoogleDrive)(nil)
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
package google_drive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/IceWhaleTech/CasaOS/internal/driver"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/internal/op"
|
|
||||||
)
|
|
||||||
|
|
||||||
const ICONURL = "./img/driver/GoogleDrive.svg"
|
|
||||||
const CLIENTID = "921743327851-urr4f7jjfp4ts639evqb3i4m4qb4u4cc.apps.googleusercontent.com"
|
|
||||||
const CLIENTSECRET = "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"
|
|
||||||
|
|
||||||
type Addition struct {
|
|
||||||
driver.RootID
|
|
||||||
RefreshToken string `json:"refresh_token" required:"true" omit:"true"`
|
|
||||||
OrderBy string `json:"order_by" type:"string" help:"such as: folder,name,modifiedTime" omit:"true"`
|
|
||||||
OrderDirection string `json:"order_direction" type:"select" options:"asc,desc" omit:"true"`
|
|
||||||
ClientID string `json:"client_id" required:"true" default:"921743327851-urr4f7jjfp4ts639evqb3i4m4qb4u4cc.apps.googleusercontent.com" omit:"true"`
|
|
||||||
ClientSecret string `json:"client_secret" required:"true" default:"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" omit:"true"`
|
|
||||||
ChunkSize int64 `json:"chunk_size" type:"number" help:"chunk size while uploading (unit: MB)" omit:"true"`
|
|
||||||
AuthUrl string `json:"auth_url" type:"string" default:"https://accounts.google.com/o/oauth2/auth/oauthchooseaccount?response_type=code&client_id=921743327851-urr4f7jjfp4ts639evqb3i4m4qb4u4cc.apps.googleusercontent.com&redirect_uri=https%3A%2F%2Fcloudoauth.files.casaos.app&scope=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fdrive&access_type=offline&approval_prompt=force&state=${HOST}%2Fv1%2Frecover%2FGoogleDrive&service=lso&o2v=1&flowName=GeneralOAuthFlow"`
|
|
||||||
Icon string `json:"icon" type:"string" default:"./img/driver/GoogleDrive.svg"`
|
|
||||||
Code string `json:"code" type:"string" help:"code from auth_url" omit:"true"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var config = driver.Config{
|
|
||||||
Name: "GoogleDrive",
|
|
||||||
OnlyProxy: true,
|
|
||||||
DefaultRoot: "root",
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
op.RegisterDriver(func() driver.Driver {
|
|
||||||
return &GoogleDrive{}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,77 +0,0 @@
|
|||||||
package google_drive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS/model"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
type UserInfo struct {
|
|
||||||
User struct {
|
|
||||||
Kind string `json:"kind"`
|
|
||||||
DisplayName string `json:"displayName"`
|
|
||||||
PhotoLink string `json:"photoLink"`
|
|
||||||
Me bool `json:"me"`
|
|
||||||
PermissionID string `json:"permissionId"`
|
|
||||||
EmailAddress string `json:"emailAddress"`
|
|
||||||
} `json:"user"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type TokenError struct {
|
|
||||||
Error string `json:"error"`
|
|
||||||
ErrorDescription string `json:"error_description"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Files struct {
|
|
||||||
NextPageToken string `json:"nextPageToken"`
|
|
||||||
Files []File `json:"files"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type File struct {
|
|
||||||
Id string `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
MimeType string `json:"mimeType"`
|
|
||||||
ModifiedTime time.Time `json:"modifiedTime"`
|
|
||||||
Size string `json:"size"`
|
|
||||||
ThumbnailLink string `json:"thumbnailLink"`
|
|
||||||
ShortcutDetails struct {
|
|
||||||
TargetId string `json:"targetId"`
|
|
||||||
TargetMimeType string `json:"targetMimeType"`
|
|
||||||
} `json:"shortcutDetails"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func fileToObj(f File) *model.ObjThumb {
|
|
||||||
log.Debugf("google file: %+v", f)
|
|
||||||
size, _ := strconv.ParseInt(f.Size, 10, 64)
|
|
||||||
obj := &model.ObjThumb{
|
|
||||||
Object: model.Object{
|
|
||||||
ID: f.Id,
|
|
||||||
Name: f.Name,
|
|
||||||
Size: size,
|
|
||||||
Modified: f.ModifiedTime,
|
|
||||||
IsFolder: f.MimeType == "application/vnd.google-apps.folder",
|
|
||||||
},
|
|
||||||
Thumbnail: model.Thumbnail{},
|
|
||||||
}
|
|
||||||
if f.MimeType == "application/vnd.google-apps.shortcut" {
|
|
||||||
obj.ID = f.ShortcutDetails.TargetId
|
|
||||||
obj.IsFolder = f.ShortcutDetails.TargetMimeType == "application/vnd.google-apps.folder"
|
|
||||||
}
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
type Error struct {
|
|
||||||
Error struct {
|
|
||||||
Errors []struct {
|
|
||||||
Domain string `json:"domain"`
|
|
||||||
Reason string `json:"reason"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
LocationType string `json:"location_type"`
|
|
||||||
Location string `json:"location"`
|
|
||||||
}
|
|
||||||
Code int `json:"code"`
|
|
||||||
Message string `json:"message"`
|
|
||||||
} `json:"error"`
|
|
||||||
}
|
|
||||||
@@ -1,152 +0,0 @@
|
|||||||
package google_drive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/drivers/base"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/model"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/utils"
|
|
||||||
"github.com/go-resty/resty/v2"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// do others that not defined in Driver interface
|
|
||||||
|
|
||||||
func (d *GoogleDrive) getRefreshToken() error {
|
|
||||||
url := "https://www.googleapis.com/oauth2/v4/token"
|
|
||||||
var resp base.TokenResp
|
|
||||||
var e TokenError
|
|
||||||
res, err := base.RestyClient.R().SetResult(&resp).SetError(&e).
|
|
||||||
SetFormData(map[string]string{
|
|
||||||
"client_id": d.ClientID,
|
|
||||||
"client_secret": d.ClientSecret,
|
|
||||||
"code": d.Code,
|
|
||||||
"grant_type": "authorization_code",
|
|
||||||
"redirect_uri": "https://cloudoauth.files.casaos.app",
|
|
||||||
}).Post(url)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logger.Info("get refresh token", zap.String("res", res.String()))
|
|
||||||
if e.Error != "" {
|
|
||||||
return fmt.Errorf(e.Error)
|
|
||||||
}
|
|
||||||
d.RefreshToken = resp.RefreshToken
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) refreshToken() error {
|
|
||||||
url := "https://www.googleapis.com/oauth2/v4/token"
|
|
||||||
var resp base.TokenResp
|
|
||||||
var e TokenError
|
|
||||||
res, err := base.RestyClient.R().SetResult(&resp).SetError(&e).
|
|
||||||
SetFormData(map[string]string{
|
|
||||||
"client_id": d.ClientID,
|
|
||||||
"client_secret": d.ClientSecret,
|
|
||||||
"refresh_token": d.RefreshToken,
|
|
||||||
"grant_type": "refresh_token",
|
|
||||||
}).Post(url)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
log.Debug(res.String())
|
|
||||||
if e.Error != "" {
|
|
||||||
return fmt.Errorf(e.Error)
|
|
||||||
}
|
|
||||||
d.AccessToken = resp.AccessToken
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) request(url string, method string, callback base.ReqCallback, resp interface{}) ([]byte, error) {
|
|
||||||
req := base.RestyClient.R()
|
|
||||||
req.SetHeader("Authorization", "Bearer "+d.AccessToken)
|
|
||||||
req.SetQueryParam("includeItemsFromAllDrives", "true")
|
|
||||||
req.SetQueryParam("supportsAllDrives", "true")
|
|
||||||
if callback != nil {
|
|
||||||
callback(req)
|
|
||||||
}
|
|
||||||
if resp != nil {
|
|
||||||
req.SetResult(resp)
|
|
||||||
}
|
|
||||||
var e Error
|
|
||||||
req.SetError(&e)
|
|
||||||
res, err := req.Execute(method, url)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if e.Error.Code != 0 {
|
|
||||||
if e.Error.Code == 401 {
|
|
||||||
err = d.refreshToken()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return d.request(url, method, callback, resp)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
|
|
||||||
}
|
|
||||||
return res.Body(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) getFiles(id string) ([]File, error) {
|
|
||||||
pageToken := "first"
|
|
||||||
res := make([]File, 0)
|
|
||||||
for pageToken != "" {
|
|
||||||
if pageToken == "first" {
|
|
||||||
pageToken = ""
|
|
||||||
}
|
|
||||||
var resp Files
|
|
||||||
orderBy := "folder,name,modifiedTime desc"
|
|
||||||
if d.OrderBy != "" {
|
|
||||||
orderBy = d.OrderBy + " " + d.OrderDirection
|
|
||||||
}
|
|
||||||
query := map[string]string{
|
|
||||||
"orderBy": orderBy,
|
|
||||||
"fields": "files(id,name,mimeType,size,modifiedTime,thumbnailLink,shortcutDetails),nextPageToken",
|
|
||||||
"pageSize": "1000",
|
|
||||||
"q": fmt.Sprintf("'%s' in parents and trashed = false", id),
|
|
||||||
//"includeItemsFromAllDrives": "true",
|
|
||||||
//"supportsAllDrives": "true",
|
|
||||||
"pageToken": pageToken,
|
|
||||||
}
|
|
||||||
_, err := d.request("https://www.googleapis.com/drive/v3/files", http.MethodGet, func(req *resty.Request) {
|
|
||||||
req.SetQueryParams(query)
|
|
||||||
}, &resp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
pageToken = resp.NextPageToken
|
|
||||||
res = append(res, resp.Files...)
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *GoogleDrive) chunkUpload(ctx context.Context, stream model.FileStreamer, url string) error {
|
|
||||||
var defaultChunkSize = d.ChunkSize * 1024 * 1024
|
|
||||||
var finish int64 = 0
|
|
||||||
for finish < stream.GetSize() {
|
|
||||||
if utils.IsCanceled(ctx) {
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
chunkSize := stream.GetSize() - finish
|
|
||||||
if chunkSize > defaultChunkSize {
|
|
||||||
chunkSize = defaultChunkSize
|
|
||||||
}
|
|
||||||
_, err := d.request(url, http.MethodPut, func(req *resty.Request) {
|
|
||||||
req.SetHeaders(map[string]string{
|
|
||||||
"Content-Length": strconv.FormatInt(chunkSize, 10),
|
|
||||||
"Content-Range": fmt.Sprintf("bytes %d-%d/%d", finish, finish+chunkSize-1, stream.GetSize()),
|
|
||||||
}).SetBody(io.LimitReader(stream.GetReadCloser(), chunkSize)).SetContext(ctx)
|
|
||||||
}, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
finish += chunkSize
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
152
go.mod
152
go.mod
@@ -1,130 +1,56 @@
|
|||||||
module github.com/IceWhaleTech/CasaOS
|
module github.com/IceWhaleTech/CasaOS
|
||||||
|
|
||||||
go 1.19
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Curtis-Milo/nat-type-identifier-go v0.0.0-20220215191915-18d42168c63d
|
github.com/Curtis-Milo/nat-type-identifier-go v0.0.0-20220215191915-18d42168c63d
|
||||||
github.com/IceWhaleTech/CasaOS-Common v0.4.2-alpha3
|
github.com/IceWhaleTech/CasaOS-Common v0.0.0-20220929035515-b1287110d6d8
|
||||||
github.com/Xhofe/go-cache v0.0.0-20220723083548-714439c8af9a
|
github.com/IceWhaleTech/CasaOS-Gateway v0.3.6
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
|
github.com/Microsoft/go-winio v0.5.0 // indirect
|
||||||
github.com/deckarep/golang-set/v2 v2.1.0
|
github.com/ambelovsky/go-structs v1.1.0 // indirect
|
||||||
github.com/deepmap/oapi-codegen v1.12.4
|
github.com/ambelovsky/gosf v0.0.0-20201109201340-237aea4d6109
|
||||||
|
github.com/ambelovsky/gosf-socketio v0.0.0-20201109193639-add9d32f8b19 // indirect
|
||||||
|
github.com/containerd/containerd v1.5.7 // indirect
|
||||||
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e
|
||||||
github.com/disintegration/imaging v1.6.2
|
github.com/disintegration/imaging v1.6.2
|
||||||
github.com/dsoprea/go-exif/v3 v3.0.0-20221012082141-d21ac8e2de85
|
github.com/docker/distribution v2.8.0+incompatible // indirect
|
||||||
github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd
|
github.com/docker/docker v20.10.7+incompatible
|
||||||
github.com/getkin/kin-openapi v0.113.0
|
github.com/docker/go-connections v0.4.0
|
||||||
|
github.com/dsoprea/go-exif/v3 v3.0.0-20210625224831-a6301f85c82b
|
||||||
|
github.com/dsoprea/go-logging v0.0.0-20200710184922-b02d349568dd // indirect
|
||||||
github.com/gin-contrib/gzip v0.0.6
|
github.com/gin-contrib/gzip v0.0.6
|
||||||
github.com/gin-gonic/gin v1.8.2
|
github.com/gin-gonic/gin v1.8.1
|
||||||
github.com/glebarez/sqlite v1.6.0
|
github.com/go-ini/ini v1.62.0
|
||||||
github.com/go-ini/ini v1.67.0
|
|
||||||
github.com/go-resty/resty/v2 v2.7.0
|
|
||||||
github.com/golang/mock v1.6.0
|
github.com/golang/mock v1.6.0
|
||||||
github.com/gomodule/redigo v1.8.9
|
github.com/gomodule/redigo v1.8.5
|
||||||
github.com/google/go-github/v36 v36.0.0
|
github.com/google/go-github/v36 v36.0.0
|
||||||
github.com/googollee/go-socket.io v1.6.2
|
github.com/googollee/go-socket.io v1.6.2
|
||||||
github.com/gorilla/websocket v1.5.0
|
github.com/gorilla/mux v1.8.0 // indirect
|
||||||
github.com/h2non/filetype v1.1.3
|
github.com/gorilla/websocket v1.4.2
|
||||||
github.com/hirochachacha/go-smb2 v1.1.0
|
github.com/hirochachacha/go-smb2 v1.1.0
|
||||||
github.com/json-iterator/go v1.1.12
|
github.com/jinzhu/copier v0.3.2
|
||||||
github.com/labstack/echo/v4 v4.10.0
|
github.com/lucas-clemente/quic-go v0.25.0
|
||||||
github.com/maruel/natural v1.1.0
|
|
||||||
github.com/mholt/archiver/v3 v3.5.1
|
github.com/mholt/archiver/v3 v3.5.1
|
||||||
github.com/moby/sys/mount v0.3.3
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||||
github.com/moby/sys/mountinfo v0.6.2
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
|
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/robfig/cron v1.2.0
|
github.com/robfig/cron v1.2.0
|
||||||
github.com/satori/go.uuid v1.2.0
|
github.com/satori/go.uuid v1.2.0
|
||||||
github.com/shirou/gopsutil/v3 v3.22.11
|
github.com/shirou/gopsutil/v3 v3.22.7
|
||||||
github.com/sirupsen/logrus v1.9.0
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/stretchr/testify v1.8.1
|
github.com/smartystreets/assertions v1.2.0 // indirect
|
||||||
github.com/tidwall/gjson v1.14.4
|
github.com/smartystreets/goconvey v1.6.4 // indirect
|
||||||
go.uber.org/zap v1.24.0
|
github.com/stretchr/testify v1.8.0
|
||||||
golang.org/x/crypto v0.5.0
|
github.com/tidwall/gjson v1.10.2
|
||||||
golang.org/x/oauth2 v0.3.0
|
go.uber.org/zap v1.21.0
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4
|
||||||
gorm.io/gorm v1.24.3
|
golang.org/x/mod v0.5.0 // indirect
|
||||||
gotest.tools v2.2.0+incompatible
|
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
|
||||||
)
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||||
|
golang.org/x/tools v0.1.7 // indirect
|
||||||
require (
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||||
github.com/andybalholm/brotli v1.0.1 // indirect
|
gorm.io/driver/sqlite v1.2.6
|
||||||
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
|
gorm.io/gorm v1.22.5
|
||||||
github.com/benbjohnson/clock v1.3.0 // indirect
|
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2 // indirect
|
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
|
||||||
github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect
|
|
||||||
github.com/dsoprea/go-utility/v2 v2.0.0-20221003172846-a3e1774ef349 // indirect
|
|
||||||
github.com/geoffgarside/ber v1.1.0 // indirect
|
|
||||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
|
||||||
github.com/glebarez/go-sqlite v1.20.0 // indirect
|
|
||||||
github.com/go-errors/errors v1.4.2 // indirect
|
|
||||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
|
||||||
github.com/go-openapi/swag v0.21.1 // indirect
|
|
||||||
github.com/go-playground/locales v0.14.0 // indirect
|
|
||||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
|
||||||
github.com/go-playground/validator/v10 v10.11.1 // indirect
|
|
||||||
github.com/goccy/go-json v0.9.11 // indirect
|
|
||||||
github.com/godbus/dbus/v5 v5.0.4 // indirect
|
|
||||||
github.com/gofrs/uuid v4.0.0+incompatible // indirect
|
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
|
|
||||||
github.com/golang-jwt/jwt/v4 v4.4.2 // indirect
|
|
||||||
github.com/golang/geo v0.0.0-20210211234256-740aa86cb551 // indirect
|
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
|
||||||
github.com/golang/snappy v0.0.4 // indirect
|
|
||||||
github.com/google/go-cmp v0.5.9 // indirect
|
|
||||||
github.com/google/go-querystring v1.0.0 // indirect
|
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
|
||||||
github.com/gorilla/mux v1.8.0 // indirect
|
|
||||||
github.com/invopop/yaml v0.1.0 // indirect
|
|
||||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
|
||||||
github.com/jinzhu/now v1.1.5 // indirect
|
|
||||||
github.com/josharian/intern v1.0.0 // indirect
|
|
||||||
github.com/klauspost/compress v1.15.13 // indirect
|
|
||||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
|
||||||
github.com/labstack/gommon v0.4.0 // indirect
|
|
||||||
github.com/leodido/go-urn v1.2.1 // indirect
|
|
||||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
|
|
||||||
github.com/mailru/easyjson v0.7.7 // indirect
|
|
||||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
|
||||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
|
||||||
github.com/mattn/go-sqlite3 v1.14.15 // indirect
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
|
||||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect
|
|
||||||
github.com/nwaples/rardecode v1.1.0 // indirect
|
|
||||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
|
||||||
github.com/perimeterx/marshmallow v1.1.4 // indirect
|
|
||||||
github.com/pierrec/lz4/v4 v4.1.2 // indirect
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
|
||||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
|
||||||
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
|
|
||||||
github.com/tidwall/match v1.1.1 // indirect
|
|
||||||
github.com/tidwall/pretty v1.2.0 // indirect
|
|
||||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
|
||||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
|
||||||
github.com/ugorji/go/codec v1.2.7 // indirect
|
|
||||||
github.com/ulikunitz/xz v0.5.9 // indirect
|
|
||||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
|
||||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
|
||||||
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
|
|
||||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
|
||||||
go.uber.org/atomic v1.7.0 // indirect
|
|
||||||
go.uber.org/multierr v1.6.0 // indirect
|
|
||||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8 // indirect
|
|
||||||
golang.org/x/net v0.5.0 // indirect
|
|
||||||
golang.org/x/sys v0.4.0 // indirect
|
|
||||||
golang.org/x/text v0.6.0 // indirect
|
|
||||||
golang.org/x/time v0.2.0 // indirect
|
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
|
||||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
|
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
|
||||||
modernc.org/libc v1.21.5 // indirect
|
|
||||||
modernc.org/mathutil v1.5.0 // indirect
|
|
||||||
modernc.org/memory v1.4.0 // indirect
|
|
||||||
modernc.org/sqlite v1.20.0 // indirect
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,43 +0,0 @@
|
|||||||
package conf
|
|
||||||
|
|
||||||
type Database struct {
|
|
||||||
Type string `json:"type" env:"DB_TYPE"`
|
|
||||||
Host string `json:"host" env:"DB_HOST"`
|
|
||||||
Port int `json:"port" env:"DB_PORT"`
|
|
||||||
User string `json:"user" env:"DB_USER"`
|
|
||||||
Password string `json:"password" env:"DB_PASS"`
|
|
||||||
Name string `json:"name" env:"DB_NAME"`
|
|
||||||
DBFile string `json:"db_file" env:"DB_FILE"`
|
|
||||||
TablePrefix string `json:"table_prefix" env:"DB_TABLE_PREFIX"`
|
|
||||||
SSLMode string `json:"ssl_mode" env:"DB_SSL_MODE"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Scheme struct {
|
|
||||||
Https bool `json:"https" env:"HTTPS"`
|
|
||||||
CertFile string `json:"cert_file" env:"CERT_FILE"`
|
|
||||||
KeyFile string `json:"key_file" env:"KEY_FILE"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type LogConfig struct {
|
|
||||||
Enable bool `json:"enable" env:"LOG_ENABLE"`
|
|
||||||
Name string `json:"name" env:"LOG_NAME"`
|
|
||||||
MaxSize int `json:"max_size" env:"MAX_SIZE"`
|
|
||||||
MaxBackups int `json:"max_backups" env:"MAX_BACKUPS"`
|
|
||||||
MaxAge int `json:"max_age" env:"MAX_AGE"`
|
|
||||||
Compress bool `json:"compress" env:"COMPRESS"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
Force bool `json:"force" env:"FORCE"`
|
|
||||||
Address string `json:"address" env:"ADDR"`
|
|
||||||
Port int `json:"port" env:"PORT"`
|
|
||||||
SiteURL string `json:"site_url" env:"SITE_URL"`
|
|
||||||
Cdn string `json:"cdn" env:"CDN"`
|
|
||||||
JwtSecret string `json:"jwt_secret" env:"JWT_SECRET"`
|
|
||||||
TokenExpiresIn int `json:"token_expires_in" env:"TOKEN_EXPIRES_IN"`
|
|
||||||
Database Database `json:"database"`
|
|
||||||
Scheme Scheme `json:"scheme"`
|
|
||||||
TempDir string `json:"temp_dir" env:"TEMP_DIR"`
|
|
||||||
BleveDir string `json:"bleve_dir" env:"BLEVE_DIR"`
|
|
||||||
Log LogConfig `json:"log"`
|
|
||||||
}
|
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
package conf
|
|
||||||
|
|
||||||
const (
|
|
||||||
TypeString = "string"
|
|
||||||
TypeSelect = "select"
|
|
||||||
TypeBool = "bool"
|
|
||||||
TypeText = "text"
|
|
||||||
TypeNumber = "number"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// site
|
|
||||||
VERSION = "version"
|
|
||||||
ApiUrl = "api_url"
|
|
||||||
BasePath = "base_path"
|
|
||||||
SiteTitle = "site_title"
|
|
||||||
Announcement = "announcement"
|
|
||||||
AllowIndexed = "allow_indexed"
|
|
||||||
|
|
||||||
Logo = "logo"
|
|
||||||
Favicon = "favicon"
|
|
||||||
MainColor = "main_color"
|
|
||||||
|
|
||||||
// preview
|
|
||||||
TextTypes = "text_types"
|
|
||||||
AudioTypes = "audio_types"
|
|
||||||
VideoTypes = "video_types"
|
|
||||||
ImageTypes = "image_types"
|
|
||||||
ProxyTypes = "proxy_types"
|
|
||||||
ProxyIgnoreHeaders = "proxy_ignore_headers"
|
|
||||||
AudioAutoplay = "audio_autoplay"
|
|
||||||
VideoAutoplay = "video_autoplay"
|
|
||||||
|
|
||||||
// global
|
|
||||||
HideFiles = "hide_files"
|
|
||||||
CustomizeHead = "customize_head"
|
|
||||||
CustomizeBody = "customize_body"
|
|
||||||
LinkExpiration = "link_expiration"
|
|
||||||
SignAll = "sign_all"
|
|
||||||
PrivacyRegs = "privacy_regs"
|
|
||||||
OcrApi = "ocr_api"
|
|
||||||
FilenameCharMapping = "filename_char_mapping"
|
|
||||||
|
|
||||||
// index
|
|
||||||
SearchIndex = "search_index"
|
|
||||||
AutoUpdateIndex = "auto_update_index"
|
|
||||||
IndexPaths = "index_paths"
|
|
||||||
IgnorePaths = "ignore_paths"
|
|
||||||
|
|
||||||
// aria2
|
|
||||||
Aria2Uri = "aria2_uri"
|
|
||||||
Aria2Secret = "aria2_secret"
|
|
||||||
|
|
||||||
// single
|
|
||||||
Token = "token"
|
|
||||||
IndexProgress = "index_progress"
|
|
||||||
|
|
||||||
//Github
|
|
||||||
GithubClientId = "github_client_id"
|
|
||||||
GithubClientSecrets = "github_client_secrets"
|
|
||||||
GithubLoginEnabled = "github_login_enabled"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
UNKNOWN = iota
|
|
||||||
FOLDER
|
|
||||||
//OFFICE
|
|
||||||
VIDEO
|
|
||||||
AUDIO
|
|
||||||
TEXT
|
|
||||||
IMAGE
|
|
||||||
)
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
package conf
|
|
||||||
|
|
||||||
import "regexp"
|
|
||||||
|
|
||||||
var (
|
|
||||||
BuiltAt string
|
|
||||||
GoVersion string
|
|
||||||
GitAuthor string
|
|
||||||
GitCommit string
|
|
||||||
Version string = "dev"
|
|
||||||
WebVersion string
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
Conf *Config
|
|
||||||
)
|
|
||||||
|
|
||||||
var SlicesMap = make(map[string][]string)
|
|
||||||
var FilenameCharMap = make(map[string]string)
|
|
||||||
var PrivacyReg []*regexp.Regexp
|
|
||||||
|
|
||||||
var (
|
|
||||||
// StoragesLoaded loaded success if empty
|
|
||||||
StoragesLoaded = false
|
|
||||||
)
|
|
||||||
var (
|
|
||||||
RawIndexHtml string
|
|
||||||
ManageHtml string
|
|
||||||
IndexHtml string
|
|
||||||
)
|
|
||||||
@@ -1,25 +0,0 @@
|
|||||||
/*
|
|
||||||
* @Author: a624669980@163.com a624669980@163.com
|
|
||||||
* @Date: 2022-12-13 11:05:05
|
|
||||||
* @LastEditors: a624669980@163.com a624669980@163.com
|
|
||||||
* @LastEditTime: 2022-12-13 11:05:13
|
|
||||||
* @FilePath: /drive/internal/driver/config.go
|
|
||||||
* @Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
|
|
||||||
*/
|
|
||||||
package driver
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
LocalSort bool `json:"local_sort"`
|
|
||||||
OnlyLocal bool `json:"only_local"`
|
|
||||||
OnlyProxy bool `json:"only_proxy"`
|
|
||||||
NoCache bool `json:"no_cache"`
|
|
||||||
NoUpload bool `json:"no_upload"`
|
|
||||||
NeedMs bool `json:"need_ms"` // if need get message from user, such as validate code
|
|
||||||
DefaultRoot string `json:"default_root"`
|
|
||||||
CheckStatus bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Config) MustProxy() bool {
|
|
||||||
return c.OnlyProxy || c.OnlyLocal
|
|
||||||
}
|
|
||||||
@@ -1,131 +0,0 @@
|
|||||||
package driver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS/model"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Driver interface {
|
|
||||||
Meta
|
|
||||||
Reader
|
|
||||||
User
|
|
||||||
//Writer
|
|
||||||
//Other
|
|
||||||
}
|
|
||||||
|
|
||||||
type Meta interface {
|
|
||||||
Config() Config
|
|
||||||
// GetStorage just get raw storage, no need to implement, because model.Storage have implemented
|
|
||||||
GetStorage() *model.Storage
|
|
||||||
SetStorage(model.Storage)
|
|
||||||
// GetAddition Additional is used for unmarshal of JSON, so need return pointer
|
|
||||||
GetAddition() Additional
|
|
||||||
// Init If already initialized, drop first
|
|
||||||
Init(ctx context.Context) error
|
|
||||||
Drop(ctx context.Context) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type Other interface {
|
|
||||||
Other(ctx context.Context, args model.OtherArgs) (interface{}, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Reader interface {
|
|
||||||
// List files in the path
|
|
||||||
// if identify files by path, need to set ID with path,like path.Join(dir.GetID(), obj.GetName())
|
|
||||||
// if identify files by id, need to set ID with corresponding id
|
|
||||||
List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error)
|
|
||||||
// Link get url/filepath/reader of file
|
|
||||||
Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error)
|
|
||||||
}
|
|
||||||
type User interface {
|
|
||||||
// GetRoot get root directory of user
|
|
||||||
GetUserInfo(ctx context.Context) (string, error)
|
|
||||||
}
|
|
||||||
type Getter interface {
|
|
||||||
GetRoot(ctx context.Context) (model.Obj, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
//type Writer interface {
|
|
||||||
// Mkdir
|
|
||||||
// Move
|
|
||||||
// Rename
|
|
||||||
// Copy
|
|
||||||
// Remove
|
|
||||||
// Put
|
|
||||||
//}
|
|
||||||
|
|
||||||
type Mkdir interface {
|
|
||||||
MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type Move interface {
|
|
||||||
Move(ctx context.Context, srcObj, dstDir model.Obj) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type Rename interface {
|
|
||||||
Rename(ctx context.Context, srcObj model.Obj, newName string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type Copy interface {
|
|
||||||
Copy(ctx context.Context, srcObj, dstDir model.Obj) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type Remove interface {
|
|
||||||
Remove(ctx context.Context, obj model.Obj) error
|
|
||||||
}
|
|
||||||
|
|
||||||
type Put interface {
|
|
||||||
Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) error
|
|
||||||
}
|
|
||||||
|
|
||||||
//type WriteResult interface {
|
|
||||||
// MkdirResult
|
|
||||||
// MoveResult
|
|
||||||
// RenameResult
|
|
||||||
// CopyResult
|
|
||||||
// PutResult
|
|
||||||
// Remove
|
|
||||||
//}
|
|
||||||
|
|
||||||
type MkdirResult interface {
|
|
||||||
MakeDir(ctx context.Context, parentDir model.Obj, dirName string) (model.Obj, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type MoveResult interface {
|
|
||||||
Move(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type RenameResult interface {
|
|
||||||
Rename(ctx context.Context, srcObj model.Obj, newName string) (model.Obj, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type CopyResult interface {
|
|
||||||
Copy(ctx context.Context, srcObj, dstDir model.Obj) (model.Obj, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type PutResult interface {
|
|
||||||
Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up UpdateProgress) (model.Obj, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type UpdateProgress func(percentage int)
|
|
||||||
|
|
||||||
type Progress struct {
|
|
||||||
Total int64
|
|
||||||
Done int64
|
|
||||||
up UpdateProgress
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Progress) Write(b []byte) (n int, err error) {
|
|
||||||
n = len(b)
|
|
||||||
p.Done += int64(n)
|
|
||||||
p.up(int(float64(p.Done) / float64(p.Total) * 100))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewProgress(total int64, up UpdateProgress) *Progress {
|
|
||||||
return &Progress{
|
|
||||||
Total: total,
|
|
||||||
up: up,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,56 +0,0 @@
|
|||||||
/*
|
|
||||||
* @Author: a624669980@163.com a624669980@163.com
|
|
||||||
* @Date: 2022-12-13 11:05:47
|
|
||||||
* @LastEditors: a624669980@163.com a624669980@163.com
|
|
||||||
* @LastEditTime: 2022-12-13 11:05:54
|
|
||||||
* @FilePath: /drive/internal/driver/item.go
|
|
||||||
* @Description: 这是默认设置,请设置`customMade`, 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE
|
|
||||||
*/
|
|
||||||
package driver
|
|
||||||
|
|
||||||
type Additional interface{}
|
|
||||||
|
|
||||||
type Select string
|
|
||||||
|
|
||||||
type Item struct {
|
|
||||||
Name string `json:"name"`
|
|
||||||
Type string `json:"type"`
|
|
||||||
Default string `json:"default"`
|
|
||||||
Options string `json:"options"`
|
|
||||||
Required bool `json:"required"`
|
|
||||||
Help string `json:"help"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Info struct {
|
|
||||||
Common []Item `json:"common"`
|
|
||||||
Additional []Item `json:"additional"`
|
|
||||||
Config Config `json:"config"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type IRootPath interface {
|
|
||||||
GetRootPath() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type IRootId interface {
|
|
||||||
GetRootId() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type RootPath struct {
|
|
||||||
RootFolderPath string `json:"root_folder_path"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type RootID struct {
|
|
||||||
RootFolderID string `json:"root_folder_id" omit:"true"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r RootPath) GetRootPath() string {
|
|
||||||
return r.RootFolderPath
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *RootPath) SetRootPath(path string) {
|
|
||||||
r.RootFolderPath = path
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r RootID) GetRootId() string {
|
|
||||||
return r.RootFolderID
|
|
||||||
}
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
package op
|
|
||||||
|
|
||||||
const (
|
|
||||||
WORK = "work"
|
|
||||||
RootName = "root"
|
|
||||||
)
|
|
||||||
@@ -1,173 +0,0 @@
|
|||||||
package op
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS/internal/conf"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS/internal/driver"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
type New func() driver.Driver
|
|
||||||
|
|
||||||
var driverNewMap = map[string]New{}
|
|
||||||
var driverInfoMap = map[string][]driver.Item{} //driver.Info{}
|
|
||||||
|
|
||||||
func RegisterDriver(driver New) {
|
|
||||||
// log.Infof("register driver: [%s]", config.Name)
|
|
||||||
tempDriver := driver()
|
|
||||||
tempConfig := tempDriver.Config()
|
|
||||||
registerDriverItems(tempConfig, tempDriver.GetAddition())
|
|
||||||
driverNewMap[tempConfig.Name] = driver
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetDriverNew(name string) (New, error) {
|
|
||||||
n, ok := driverNewMap[name]
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.Errorf("no driver named: %s", name)
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetDriverNames() []string {
|
|
||||||
var driverNames []string
|
|
||||||
for k := range driverInfoMap {
|
|
||||||
driverNames = append(driverNames, k)
|
|
||||||
}
|
|
||||||
return driverNames
|
|
||||||
}
|
|
||||||
|
|
||||||
// func GetDriverInfoMap() map[string]driver.Info {
|
|
||||||
// return driverInfoMap
|
|
||||||
// }
|
|
||||||
func GetDriverInfoMap() map[string][]driver.Item {
|
|
||||||
return driverInfoMap
|
|
||||||
}
|
|
||||||
func registerDriverItems(config driver.Config, addition driver.Additional) {
|
|
||||||
// log.Debugf("addition of %s: %+v", config.Name, addition)
|
|
||||||
tAddition := reflect.TypeOf(addition)
|
|
||||||
for tAddition.Kind() == reflect.Pointer {
|
|
||||||
tAddition = tAddition.Elem()
|
|
||||||
}
|
|
||||||
//mainItems := getMainItems(config)
|
|
||||||
additionalItems := getAdditionalItems(tAddition, config.DefaultRoot)
|
|
||||||
driverInfoMap[config.Name] = additionalItems
|
|
||||||
// driver.Info{
|
|
||||||
// Common: mainItems,
|
|
||||||
// Additional: additionalItems,
|
|
||||||
// Config: config,
|
|
||||||
// }
|
|
||||||
}
|
|
||||||
|
|
||||||
func getMainItems(config driver.Config) []driver.Item {
|
|
||||||
items := []driver.Item{{
|
|
||||||
Name: "mount_path",
|
|
||||||
Type: conf.TypeString,
|
|
||||||
Required: true,
|
|
||||||
Help: "",
|
|
||||||
}, {
|
|
||||||
Name: "order",
|
|
||||||
Type: conf.TypeNumber,
|
|
||||||
Help: "use to sort",
|
|
||||||
}, {
|
|
||||||
Name: "remark",
|
|
||||||
Type: conf.TypeText,
|
|
||||||
}}
|
|
||||||
if !config.NoCache {
|
|
||||||
items = append(items, driver.Item{
|
|
||||||
Name: "cache_expiration",
|
|
||||||
Type: conf.TypeNumber,
|
|
||||||
Default: "30",
|
|
||||||
Required: true,
|
|
||||||
Help: "The cache expiration time for this storage",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
if !config.OnlyProxy && !config.OnlyLocal {
|
|
||||||
items = append(items, []driver.Item{{
|
|
||||||
Name: "web_proxy",
|
|
||||||
Type: conf.TypeBool,
|
|
||||||
}, {
|
|
||||||
Name: "webdav_policy",
|
|
||||||
Type: conf.TypeSelect,
|
|
||||||
Options: "302_redirect,use_proxy_url,native_proxy",
|
|
||||||
Default: "302_redirect",
|
|
||||||
Required: true,
|
|
||||||
},
|
|
||||||
}...)
|
|
||||||
} else {
|
|
||||||
items = append(items, driver.Item{
|
|
||||||
Name: "webdav_policy",
|
|
||||||
Type: conf.TypeSelect,
|
|
||||||
Default: "native_proxy",
|
|
||||||
Options: "use_proxy_url,native_proxy",
|
|
||||||
Required: true,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
items = append(items, driver.Item{
|
|
||||||
Name: "down_proxy_url",
|
|
||||||
Type: conf.TypeText,
|
|
||||||
})
|
|
||||||
if config.LocalSort {
|
|
||||||
items = append(items, []driver.Item{{
|
|
||||||
Name: "order_by",
|
|
||||||
Type: conf.TypeSelect,
|
|
||||||
Options: "name,size,modified",
|
|
||||||
}, {
|
|
||||||
Name: "order_direction",
|
|
||||||
Type: conf.TypeSelect,
|
|
||||||
Options: "asc,desc",
|
|
||||||
}}...)
|
|
||||||
}
|
|
||||||
items = append(items, driver.Item{
|
|
||||||
Name: "extract_folder",
|
|
||||||
Type: conf.TypeSelect,
|
|
||||||
Options: "front,back",
|
|
||||||
})
|
|
||||||
return items
|
|
||||||
}
|
|
||||||
|
|
||||||
func getAdditionalItems(t reflect.Type, defaultRoot string) []driver.Item {
|
|
||||||
var items []driver.Item
|
|
||||||
for i := 0; i < t.NumField(); i++ {
|
|
||||||
|
|
||||||
field := t.Field(i)
|
|
||||||
if field.Type.Kind() == reflect.Struct {
|
|
||||||
items = append(items, getAdditionalItems(field.Type, defaultRoot)...)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
tag := field.Tag
|
|
||||||
ignore, ok1 := tag.Lookup("ignore")
|
|
||||||
name, ok2 := tag.Lookup("json")
|
|
||||||
if (ok1 && ignore == "true") || !ok2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if tag.Get("omit") == "true" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
item := driver.Item{
|
|
||||||
Name: name,
|
|
||||||
Type: strings.ToLower(field.Type.Name()),
|
|
||||||
Default: tag.Get("default"),
|
|
||||||
Options: tag.Get("options"),
|
|
||||||
Required: tag.Get("required") == "true",
|
|
||||||
Help: tag.Get("help"),
|
|
||||||
}
|
|
||||||
if tag.Get("type") != "" {
|
|
||||||
item.Type = tag.Get("type")
|
|
||||||
}
|
|
||||||
if item.Name == "root_folder_id" || item.Name == "root_folder_path" {
|
|
||||||
if item.Default == "" {
|
|
||||||
item.Default = defaultRoot
|
|
||||||
}
|
|
||||||
item.Required = item.Default != ""
|
|
||||||
}
|
|
||||||
// set default type to string
|
|
||||||
if item.Type == "" {
|
|
||||||
item.Type = "string"
|
|
||||||
}
|
|
||||||
items = append(items, item)
|
|
||||||
}
|
|
||||||
return items
|
|
||||||
}
|
|
||||||
@@ -1,545 +0,0 @@
|
|||||||
package op
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
stdpath "path"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS/internal/driver"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/model"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/generic_sync"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/singleflight"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/utils"
|
|
||||||
"github.com/Xhofe/go-cache"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
pkgerr "github.com/pkg/errors"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// In order to facilitate adding some other things before and after file op
|
|
||||||
|
|
||||||
var listCache = cache.NewMemCache(cache.WithShards[[]model.Obj](64))
|
|
||||||
var listG singleflight.Group[[]model.Obj]
|
|
||||||
|
|
||||||
func updateCacheObj(storage driver.Driver, path string, oldObj model.Obj, newObj model.Obj) {
|
|
||||||
key := Key(storage, path)
|
|
||||||
objs, ok := listCache.Get(key)
|
|
||||||
if ok {
|
|
||||||
for i, obj := range objs {
|
|
||||||
if obj.GetName() == oldObj.GetName() {
|
|
||||||
objs[i] = newObj
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func delCacheObj(storage driver.Driver, path string, obj model.Obj) {
|
|
||||||
key := Key(storage, path)
|
|
||||||
objs, ok := listCache.Get(key)
|
|
||||||
if ok {
|
|
||||||
for i, oldObj := range objs {
|
|
||||||
if oldObj.GetName() == obj.GetName() {
|
|
||||||
objs = append(objs[:i], objs[i+1:]...)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var addSortDebounceMap generic_sync.MapOf[string, func(func())]
|
|
||||||
|
|
||||||
func addCacheObj(storage driver.Driver, path string, newObj model.Obj) {
|
|
||||||
key := Key(storage, path)
|
|
||||||
objs, ok := listCache.Get(key)
|
|
||||||
if ok {
|
|
||||||
for i, obj := range objs {
|
|
||||||
if obj.GetName() == newObj.GetName() {
|
|
||||||
objs[i] = newObj
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Simple separation of files and folders
|
|
||||||
if len(objs) > 0 && objs[len(objs)-1].IsDir() == newObj.IsDir() {
|
|
||||||
objs = append(objs, newObj)
|
|
||||||
} else {
|
|
||||||
objs = append([]model.Obj{newObj}, objs...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if storage.Config().LocalSort {
|
|
||||||
debounce, _ := addSortDebounceMap.LoadOrStore(key, utils.NewDebounce(time.Minute))
|
|
||||||
log.Debug("addCacheObj: wait start sort")
|
|
||||||
debounce(func() {
|
|
||||||
log.Debug("addCacheObj: start sort")
|
|
||||||
model.SortFiles(objs, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
|
|
||||||
addSortDebounceMap.Delete(key)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
listCache.Set(key, objs, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ClearCache(storage driver.Driver, path string) {
|
|
||||||
listCache.Del(Key(storage, path))
|
|
||||||
}
|
|
||||||
|
|
||||||
func Key(storage driver.Driver, path string) string {
|
|
||||||
return stdpath.Join(storage.GetStorage().MountPath, utils.FixAndCleanPath(path))
|
|
||||||
}
|
|
||||||
|
|
||||||
// List files in storage, not contains virtual file
|
|
||||||
func List(ctx context.Context, storage driver.Driver, path string, args model.ListArgs, refresh ...bool) ([]model.Obj, error) {
|
|
||||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
||||||
return nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
||||||
}
|
|
||||||
path = utils.FixAndCleanPath(path)
|
|
||||||
log.Debugf("op.List %s", path)
|
|
||||||
key := Key(storage, path)
|
|
||||||
if !utils.IsBool(refresh...) {
|
|
||||||
if files, ok := listCache.Get(key); ok {
|
|
||||||
log.Debugf("use cache when list %s", path)
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dir, err := GetUnwrap(ctx, storage, path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.WithMessage(err, "failed get dir")
|
|
||||||
}
|
|
||||||
log.Debugf("list dir: %+v", dir)
|
|
||||||
if !dir.IsDir() {
|
|
||||||
return nil, errors.WithStack(errors.New("not a folder"))
|
|
||||||
}
|
|
||||||
objs, err, _ := listG.Do(key, func() ([]model.Obj, error) {
|
|
||||||
files, err := storage.List(ctx, dir, args)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to list objs")
|
|
||||||
}
|
|
||||||
// set path
|
|
||||||
for _, f := range files {
|
|
||||||
if s, ok := f.(model.SetPath); ok && f.GetPath() == "" && dir.GetPath() != "" {
|
|
||||||
s.SetPath(stdpath.Join(dir.GetPath(), f.GetName()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// warp obj name
|
|
||||||
model.WrapObjsName(files)
|
|
||||||
// call hooks
|
|
||||||
go func(reqPath string, files []model.Obj) {
|
|
||||||
for _, hook := range ObjsUpdateHooks {
|
|
||||||
hook(args.ReqPath, files)
|
|
||||||
}
|
|
||||||
}(args.ReqPath, files)
|
|
||||||
|
|
||||||
// sort objs
|
|
||||||
if storage.Config().LocalSort {
|
|
||||||
model.SortFiles(files, storage.GetStorage().OrderBy, storage.GetStorage().OrderDirection)
|
|
||||||
}
|
|
||||||
model.ExtractFolder(files, storage.GetStorage().ExtractFolder)
|
|
||||||
|
|
||||||
if !storage.Config().NoCache {
|
|
||||||
if len(files) > 0 {
|
|
||||||
log.Debugf("set cache: %s => %+v", key, files)
|
|
||||||
listCache.Set(key, files, cache.WithEx[[]model.Obj](time.Minute*time.Duration(storage.GetStorage().CacheExpiration)))
|
|
||||||
} else {
|
|
||||||
log.Debugf("del cache: %s", key)
|
|
||||||
listCache.Del(key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return files, nil
|
|
||||||
})
|
|
||||||
return objs, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get object from list of files
|
|
||||||
func Get(ctx context.Context, storage driver.Driver, path string) (model.Obj, error) {
|
|
||||||
path = utils.FixAndCleanPath(path)
|
|
||||||
log.Debugf("op.Get %s", path)
|
|
||||||
|
|
||||||
// is root folder
|
|
||||||
if utils.PathEqual(path, "/") {
|
|
||||||
var rootObj model.Obj
|
|
||||||
switch r := storage.GetAddition().(type) {
|
|
||||||
case driver.IRootId:
|
|
||||||
rootObj = &model.Object{
|
|
||||||
ID: r.GetRootId(),
|
|
||||||
Name: RootName,
|
|
||||||
Size: 0,
|
|
||||||
Modified: storage.GetStorage().Modified,
|
|
||||||
IsFolder: true,
|
|
||||||
Path: path,
|
|
||||||
}
|
|
||||||
case driver.IRootPath:
|
|
||||||
rootObj = &model.Object{
|
|
||||||
Path: r.GetRootPath(),
|
|
||||||
Name: RootName,
|
|
||||||
Size: 0,
|
|
||||||
Modified: storage.GetStorage().Modified,
|
|
||||||
IsFolder: true,
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
if storage, ok := storage.(driver.Getter); ok {
|
|
||||||
obj, err := storage.GetRoot(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.WithMessage(err, "failed get root obj")
|
|
||||||
}
|
|
||||||
rootObj = obj
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if rootObj == nil {
|
|
||||||
return nil, errors.Errorf("please implement IRootPath or IRootId or Getter method")
|
|
||||||
}
|
|
||||||
return &model.ObjWrapName{
|
|
||||||
Name: RootName,
|
|
||||||
Obj: rootObj,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// not root folder
|
|
||||||
dir, name := stdpath.Split(path)
|
|
||||||
files, err := List(ctx, storage, dir, model.ListArgs{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.WithMessage(err, "failed get parent list")
|
|
||||||
}
|
|
||||||
for _, f := range files {
|
|
||||||
// TODO maybe copy obj here
|
|
||||||
if f.GetName() == name {
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Debugf("cant find obj with name: %s", name)
|
|
||||||
return nil, errors.WithStack(errors.New("object not found"))
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetUnwrap(ctx context.Context, storage driver.Driver, path string) (model.Obj, error) {
|
|
||||||
obj, err := Get(ctx, storage, path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return model.UnwrapObjs(obj), err
|
|
||||||
}
|
|
||||||
|
|
||||||
var linkCache = cache.NewMemCache(cache.WithShards[*model.Link](16))
|
|
||||||
var linkG singleflight.Group[*model.Link]
|
|
||||||
|
|
||||||
// Link get link, if is an url. should have an expiry time
|
|
||||||
func Link(ctx context.Context, storage driver.Driver, path string, args model.LinkArgs) (*model.Link, model.Obj, error) {
|
|
||||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
||||||
return nil, nil, errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
||||||
}
|
|
||||||
file, err := GetUnwrap(ctx, storage, path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, errors.WithMessage(err, "failed to get file")
|
|
||||||
}
|
|
||||||
if file.IsDir() {
|
|
||||||
return nil, nil, errors.WithStack(errors.New("not a file"))
|
|
||||||
}
|
|
||||||
key := Key(storage, path) + ":" + args.IP
|
|
||||||
if link, ok := linkCache.Get(key); ok {
|
|
||||||
return link, file, nil
|
|
||||||
}
|
|
||||||
fn := func() (*model.Link, error) {
|
|
||||||
link, err := storage.Link(ctx, file, args)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed get link")
|
|
||||||
}
|
|
||||||
if link.Expiration != nil {
|
|
||||||
linkCache.Set(key, link, cache.WithEx[*model.Link](*link.Expiration))
|
|
||||||
}
|
|
||||||
return link, nil
|
|
||||||
}
|
|
||||||
link, err, _ := linkG.Do(key, fn)
|
|
||||||
return link, file, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Other api
|
|
||||||
func Other(ctx context.Context, storage driver.Driver, args model.FsOtherArgs) (interface{}, error) {
|
|
||||||
obj, err := GetUnwrap(ctx, storage, args.Path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.WithMessagef(err, "failed to get obj")
|
|
||||||
}
|
|
||||||
if o, ok := storage.(driver.Other); ok {
|
|
||||||
return o.Other(ctx, model.OtherArgs{
|
|
||||||
Obj: obj,
|
|
||||||
Method: args.Method,
|
|
||||||
Data: args.Data,
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
return nil, errors.New("not implement")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var mkdirG singleflight.Group[interface{}]
|
|
||||||
|
|
||||||
func MakeDir(ctx context.Context, storage driver.Driver, path string, lazyCache ...bool) error {
|
|
||||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
||||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
||||||
}
|
|
||||||
path = utils.FixAndCleanPath(path)
|
|
||||||
key := Key(storage, path)
|
|
||||||
_, err, _ := mkdirG.Do(key, func() (interface{}, error) {
|
|
||||||
// check if dir exists
|
|
||||||
f, err := GetUnwrap(ctx, storage, path)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(pkgerr.Cause(err), errors.New("object not found")) {
|
|
||||||
parentPath, dirName := stdpath.Split(path)
|
|
||||||
err = MakeDir(ctx, storage, parentPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.WithMessagef(err, "failed to make parent dir [%s]", parentPath)
|
|
||||||
}
|
|
||||||
parentDir, err := GetUnwrap(ctx, storage, parentPath)
|
|
||||||
// this should not happen
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.WithMessagef(err, "failed to get parent dir [%s]", parentPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch s := storage.(type) {
|
|
||||||
case driver.MkdirResult:
|
|
||||||
var newObj model.Obj
|
|
||||||
newObj, err = s.MakeDir(ctx, parentDir, dirName)
|
|
||||||
if err == nil {
|
|
||||||
if newObj != nil {
|
|
||||||
addCacheObj(storage, parentPath, model.WrapObjName(newObj))
|
|
||||||
} else if !utils.IsBool(lazyCache...) {
|
|
||||||
ClearCache(storage, parentPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case driver.Mkdir:
|
|
||||||
err = s.MakeDir(ctx, parentDir, dirName)
|
|
||||||
if err == nil && !utils.IsBool(lazyCache...) {
|
|
||||||
ClearCache(storage, parentPath)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, errors.New("not implement")
|
|
||||||
}
|
|
||||||
return nil, errors.WithStack(err)
|
|
||||||
}
|
|
||||||
return nil, errors.WithMessage(err, "failed to check if dir exists")
|
|
||||||
}
|
|
||||||
// dir exists
|
|
||||||
if f.IsDir() {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
// dir to make is a file
|
|
||||||
return nil, errors.New("file exists")
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func Move(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, lazyCache ...bool) error {
|
|
||||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
||||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
||||||
}
|
|
||||||
srcPath = utils.FixAndCleanPath(srcPath)
|
|
||||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
|
||||||
srcRawObj, err := Get(ctx, storage, srcPath)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to get src object")
|
|
||||||
}
|
|
||||||
srcObj := model.UnwrapObjs(srcRawObj)
|
|
||||||
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to get dst dir")
|
|
||||||
}
|
|
||||||
srcDirPath := stdpath.Dir(srcPath)
|
|
||||||
|
|
||||||
switch s := storage.(type) {
|
|
||||||
case driver.MoveResult:
|
|
||||||
var newObj model.Obj
|
|
||||||
newObj, err = s.Move(ctx, srcObj, dstDir)
|
|
||||||
if err == nil {
|
|
||||||
delCacheObj(storage, srcDirPath, srcRawObj)
|
|
||||||
if newObj != nil {
|
|
||||||
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
|
|
||||||
} else if !utils.IsBool(lazyCache...) {
|
|
||||||
ClearCache(storage, dstDirPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case driver.Move:
|
|
||||||
err = s.Move(ctx, srcObj, dstDir)
|
|
||||||
if err == nil {
|
|
||||||
delCacheObj(storage, srcDirPath, srcRawObj)
|
|
||||||
if !utils.IsBool(lazyCache...) {
|
|
||||||
ClearCache(storage, dstDirPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return errors.New("not implement")
|
|
||||||
}
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Rename(ctx context.Context, storage driver.Driver, srcPath, dstName string, lazyCache ...bool) error {
|
|
||||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
||||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
||||||
}
|
|
||||||
srcPath = utils.FixAndCleanPath(srcPath)
|
|
||||||
srcRawObj, err := Get(ctx, storage, srcPath)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to get src object")
|
|
||||||
}
|
|
||||||
srcObj := model.UnwrapObjs(srcRawObj)
|
|
||||||
srcDirPath := stdpath.Dir(srcPath)
|
|
||||||
|
|
||||||
switch s := storage.(type) {
|
|
||||||
case driver.RenameResult:
|
|
||||||
var newObj model.Obj
|
|
||||||
newObj, err = s.Rename(ctx, srcObj, dstName)
|
|
||||||
if err == nil {
|
|
||||||
if newObj != nil {
|
|
||||||
updateCacheObj(storage, srcDirPath, srcRawObj, model.WrapObjName(newObj))
|
|
||||||
} else if !utils.IsBool(lazyCache...) {
|
|
||||||
ClearCache(storage, srcDirPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case driver.Rename:
|
|
||||||
err = s.Rename(ctx, srcObj, dstName)
|
|
||||||
if err == nil && !utils.IsBool(lazyCache...) {
|
|
||||||
ClearCache(storage, srcDirPath)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return errors.New("not implement")
|
|
||||||
}
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy Just copy file[s] in a storage
|
|
||||||
func Copy(ctx context.Context, storage driver.Driver, srcPath, dstDirPath string, lazyCache ...bool) error {
|
|
||||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
||||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
||||||
}
|
|
||||||
srcPath = utils.FixAndCleanPath(srcPath)
|
|
||||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
|
||||||
srcObj, err := GetUnwrap(ctx, storage, srcPath)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to get src object")
|
|
||||||
}
|
|
||||||
dstDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithMessage(err, "failed to get dst dir")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch s := storage.(type) {
|
|
||||||
case driver.CopyResult:
|
|
||||||
var newObj model.Obj
|
|
||||||
newObj, err = s.Copy(ctx, srcObj, dstDir)
|
|
||||||
if err == nil {
|
|
||||||
if newObj != nil {
|
|
||||||
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
|
|
||||||
} else if !utils.IsBool(lazyCache...) {
|
|
||||||
ClearCache(storage, dstDirPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case driver.Copy:
|
|
||||||
err = s.Copy(ctx, srcObj, dstDir)
|
|
||||||
if err == nil && !utils.IsBool(lazyCache...) {
|
|
||||||
ClearCache(storage, dstDirPath)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return errors.New("not implement")
|
|
||||||
}
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Remove(ctx context.Context, storage driver.Driver, path string) error {
|
|
||||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
||||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
||||||
}
|
|
||||||
path = utils.FixAndCleanPath(path)
|
|
||||||
rawObj, err := Get(ctx, storage, path)
|
|
||||||
if err != nil {
|
|
||||||
// if object not found, it's ok
|
|
||||||
if errors.Is(pkgerr.Cause(err), errors.New("object not found")) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return errors.WithMessage(err, "failed to get object")
|
|
||||||
}
|
|
||||||
dirPath := stdpath.Dir(path)
|
|
||||||
|
|
||||||
switch s := storage.(type) {
|
|
||||||
case driver.Remove:
|
|
||||||
err = s.Remove(ctx, model.UnwrapObjs(rawObj))
|
|
||||||
if err == nil {
|
|
||||||
delCacheObj(storage, dirPath, rawObj)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return errors.New("not implement")
|
|
||||||
}
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file *model.FileStream, up driver.UpdateProgress, lazyCache ...bool) error {
|
|
||||||
if storage.Config().CheckStatus && storage.GetStorage().Status != WORK {
|
|
||||||
return errors.Errorf("storage not init: %s", storage.GetStorage().Status)
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if f, ok := file.GetReadCloser().(*os.File); ok {
|
|
||||||
err := os.RemoveAll(f.Name())
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to remove file [%s]", f.Name())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
defer func() {
|
|
||||||
if err := file.Close(); err != nil {
|
|
||||||
log.Errorf("failed to close file streamer, %v", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
// if file exist and size = 0, delete it
|
|
||||||
dstDirPath = utils.FixAndCleanPath(dstDirPath)
|
|
||||||
dstPath := stdpath.Join(dstDirPath, file.GetName())
|
|
||||||
fi, err := GetUnwrap(ctx, storage, dstPath)
|
|
||||||
if err == nil {
|
|
||||||
if fi.GetSize() == 0 {
|
|
||||||
err = Remove(ctx, storage, dstPath)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithMessagef(err, "failed remove file that exist and have size 0")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
file.Old = fi
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = MakeDir(ctx, storage, dstDirPath)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithMessagef(err, "failed to make dir [%s]", dstDirPath)
|
|
||||||
}
|
|
||||||
parentDir, err := GetUnwrap(ctx, storage, dstDirPath)
|
|
||||||
// this should not happen
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithMessagef(err, "failed to get dir [%s]", dstDirPath)
|
|
||||||
}
|
|
||||||
// if up is nil, set a default to prevent panic
|
|
||||||
if up == nil {
|
|
||||||
up = func(p int) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch s := storage.(type) {
|
|
||||||
case driver.PutResult:
|
|
||||||
var newObj model.Obj
|
|
||||||
newObj, err = s.Put(ctx, parentDir, file, up)
|
|
||||||
if err == nil {
|
|
||||||
if newObj != nil {
|
|
||||||
addCacheObj(storage, dstDirPath, model.WrapObjName(newObj))
|
|
||||||
} else if !utils.IsBool(lazyCache...) {
|
|
||||||
ClearCache(storage, dstDirPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case driver.Put:
|
|
||||||
err = s.Put(ctx, parentDir, file, up)
|
|
||||||
if err == nil && !utils.IsBool(lazyCache...) {
|
|
||||||
ClearCache(storage, dstDirPath)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return errors.New("not implement")
|
|
||||||
}
|
|
||||||
log.Debugf("put file [%s] done", file.GetName())
|
|
||||||
//if err == nil {
|
|
||||||
// //clear cache
|
|
||||||
// key := stdpath.Join(storage.GetStorage().MountPath, dstDirPath)
|
|
||||||
// listCache.Del(key)
|
|
||||||
//}
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
@@ -1,109 +0,0 @@
|
|||||||
package op
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/internal/conf"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/internal/driver"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/model"
|
|
||||||
jsoniter "github.com/json-iterator/go"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Obj
|
|
||||||
type ObjsUpdateHook = func(parent string, objs []model.Obj)
|
|
||||||
|
|
||||||
var (
|
|
||||||
ObjsUpdateHooks = make([]ObjsUpdateHook, 0)
|
|
||||||
)
|
|
||||||
|
|
||||||
func RegisterObjsUpdateHook(hook ObjsUpdateHook) {
|
|
||||||
ObjsUpdateHooks = append(ObjsUpdateHooks, hook)
|
|
||||||
}
|
|
||||||
|
|
||||||
func HandleObjsUpdateHook(parent string, objs []model.Obj) {
|
|
||||||
for _, hook := range ObjsUpdateHooks {
|
|
||||||
hook(parent, objs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setting
|
|
||||||
type SettingItemHook func(item *model.SettingItem) error
|
|
||||||
|
|
||||||
var settingItemHooks = map[string]SettingItemHook{
|
|
||||||
conf.VideoTypes: func(item *model.SettingItem) error {
|
|
||||||
conf.SlicesMap[conf.VideoTypes] = strings.Split(item.Value, ",")
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
conf.AudioTypes: func(item *model.SettingItem) error {
|
|
||||||
conf.SlicesMap[conf.AudioTypes] = strings.Split(item.Value, ",")
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
conf.ImageTypes: func(item *model.SettingItem) error {
|
|
||||||
conf.SlicesMap[conf.ImageTypes] = strings.Split(item.Value, ",")
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
conf.TextTypes: func(item *model.SettingItem) error {
|
|
||||||
conf.SlicesMap[conf.TextTypes] = strings.Split(item.Value, ",")
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
conf.ProxyTypes: func(item *model.SettingItem) error {
|
|
||||||
conf.SlicesMap[conf.ProxyTypes] = strings.Split(item.Value, ",")
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
conf.ProxyIgnoreHeaders: func(item *model.SettingItem) error {
|
|
||||||
conf.SlicesMap[conf.ProxyIgnoreHeaders] = strings.Split(item.Value, ",")
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
conf.PrivacyRegs: func(item *model.SettingItem) error {
|
|
||||||
regStrs := strings.Split(item.Value, "\n")
|
|
||||||
regs := make([]*regexp.Regexp, 0, len(regStrs))
|
|
||||||
for _, regStr := range regStrs {
|
|
||||||
reg, err := regexp.Compile(regStr)
|
|
||||||
if err != nil {
|
|
||||||
return errors.WithStack(err)
|
|
||||||
}
|
|
||||||
regs = append(regs, reg)
|
|
||||||
}
|
|
||||||
conf.PrivacyReg = regs
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
conf.FilenameCharMapping: func(item *model.SettingItem) error {
|
|
||||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
|
||||||
err := json.UnmarshalFromString(item.Value, &conf.FilenameCharMap)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logger.Info("filename char mapping", zap.Any("FilenameCharMap", conf.FilenameCharMap))
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterSettingItemHook(key string, hook SettingItemHook) {
|
|
||||||
settingItemHooks[key] = hook
|
|
||||||
}
|
|
||||||
|
|
||||||
func HandleSettingItemHook(item *model.SettingItem) (hasHook bool, err error) {
|
|
||||||
if hook, ok := settingItemHooks[item.Key]; ok {
|
|
||||||
return true, hook(item)
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Storage
|
|
||||||
type StorageHook func(typ string, storage driver.Driver)
|
|
||||||
|
|
||||||
var storageHooks = make([]StorageHook, 0)
|
|
||||||
|
|
||||||
func CallStorageHooks(typ string, storage driver.Driver) {
|
|
||||||
for _, hook := range storageHooks {
|
|
||||||
hook(typ, storage)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func RegisterStorageHook(hook StorageHook) {
|
|
||||||
storageHooks = append(storageHooks, hook)
|
|
||||||
}
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
package sign
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/sign"
|
|
||||||
)
|
|
||||||
|
|
||||||
var once sync.Once
|
|
||||||
var instance sign.Sign
|
|
||||||
|
|
||||||
func Sign(data string) string {
|
|
||||||
|
|
||||||
return NotExpired(data)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func WithDuration(data string, d time.Duration) string {
|
|
||||||
once.Do(Instance)
|
|
||||||
return instance.Sign(data, time.Now().Add(d).Unix())
|
|
||||||
}
|
|
||||||
|
|
||||||
func NotExpired(data string) string {
|
|
||||||
once.Do(Instance)
|
|
||||||
return instance.Sign(data, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Verify(data string, sign string) error {
|
|
||||||
once.Do(Instance)
|
|
||||||
return instance.Verify(data, sign)
|
|
||||||
}
|
|
||||||
|
|
||||||
func Instance() {
|
|
||||||
instance = sign.NewHMACSign([]byte("token"))
|
|
||||||
}
|
|
||||||
139
main.go
139
main.go
@@ -1,10 +1,6 @@
|
|||||||
//go:generate bash -c "mkdir -p codegen && go run github.com/deepmap/oapi-codegen/cmd/oapi-codegen@v1.12.4 -generate types,server,spec -package codegen api/casaos/openapi.yaml > codegen/casaos_api.go"
|
|
||||||
//go:generate bash -c "mkdir -p codegen/message_bus && go run github.com/deepmap/oapi-codegen/cmd/oapi-codegen@v1.12.4 -generate types,client -package message_bus https://raw.githubusercontent.com/IceWhaleTech/CasaOS-MessageBus/main/api/message_bus/openapi.yaml > codegen/message_bus/api.go"
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
_ "embed"
|
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
@@ -12,25 +8,19 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS-Common/model"
|
"github.com/IceWhaleTech/CasaOS-Gateway/common"
|
||||||
"github.com/IceWhaleTech/CasaOS-Common/utils/constants"
|
"github.com/IceWhaleTech/CasaOS/model/notify"
|
||||||
"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
|
|
||||||
|
|
||||||
util_http "github.com/IceWhaleTech/CasaOS-Common/utils/http"
|
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS/codegen/message_bus"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/common"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/cache"
|
"github.com/IceWhaleTech/CasaOS/pkg/cache"
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/config"
|
"github.com/IceWhaleTech/CasaOS/pkg/config"
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/sqlite"
|
"github.com/IceWhaleTech/CasaOS/pkg/sqlite"
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/utils/command"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/utils/file"
|
"github.com/IceWhaleTech/CasaOS/pkg/utils/file"
|
||||||
|
"github.com/IceWhaleTech/CasaOS/pkg/utils/loger"
|
||||||
"github.com/IceWhaleTech/CasaOS/route"
|
"github.com/IceWhaleTech/CasaOS/route"
|
||||||
"github.com/IceWhaleTech/CasaOS/service"
|
"github.com/IceWhaleTech/CasaOS/service"
|
||||||
|
"github.com/IceWhaleTech/CasaOS/types"
|
||||||
"github.com/coreos/go-systemd/daemon"
|
"github.com/coreos/go-systemd/daemon"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
|
||||||
_ "github.com/IceWhaleTech/CasaOS/drivers"
|
|
||||||
"github.com/robfig/cron"
|
"github.com/robfig/cron"
|
||||||
"gorm.io/gorm"
|
"gorm.io/gorm"
|
||||||
)
|
)
|
||||||
@@ -40,15 +30,6 @@ const LOCALHOST = "127.0.0.1"
|
|||||||
var sqliteDB *gorm.DB
|
var sqliteDB *gorm.DB
|
||||||
|
|
||||||
var (
|
var (
|
||||||
commit = "private build"
|
|
||||||
date = "private build"
|
|
||||||
|
|
||||||
//go:embed api/index.html
|
|
||||||
_docHTML string
|
|
||||||
|
|
||||||
//go:embed api/casaos/openapi.yaml
|
|
||||||
_docYAML string
|
|
||||||
|
|
||||||
configFlag = flag.String("c", "", "config address")
|
configFlag = flag.String("c", "", "config address")
|
||||||
dbFlag = flag.String("db", "", "db path")
|
dbFlag = flag.String("db", "", "db path")
|
||||||
versionFlag = flag.Bool("v", false, "version")
|
versionFlag = flag.Bool("v", false, "version")
|
||||||
@@ -57,16 +38,12 @@ var (
|
|||||||
func init() {
|
func init() {
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
if *versionFlag {
|
if *versionFlag {
|
||||||
fmt.Println("v" + common.VERSION)
|
fmt.Println("v" + types.CURRENTVERSION)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
println("git commit:", commit)
|
|
||||||
println("build date:", date)
|
|
||||||
|
|
||||||
config.InitSetup(*configFlag)
|
config.InitSetup(*configFlag)
|
||||||
|
|
||||||
logger.LogInit(config.AppInfo.LogPath, config.AppInfo.LogSaveName, config.AppInfo.LogFileExt)
|
loger.LogInit()
|
||||||
if len(*dbFlag) == 0 {
|
if len(*dbFlag) == 0 {
|
||||||
*dbFlag = config.AppInfo.DBPath + "/db"
|
*dbFlag = config.AppInfo.DBPath + "/db"
|
||||||
}
|
}
|
||||||
@@ -78,9 +55,13 @@ func init() {
|
|||||||
|
|
||||||
service.Cache = cache.Init()
|
service.Cache = cache.Init()
|
||||||
|
|
||||||
service.GetCPUThermalZone()
|
service.GetToken()
|
||||||
service.MyService.Storages().InitStorages()
|
|
||||||
|
service.NewVersionApp = make(map[string]string)
|
||||||
route.InitFunction()
|
route.InitFunction()
|
||||||
|
|
||||||
|
// go service.LoopFriend()
|
||||||
|
// go service.MyService.App().CheckNewImage()
|
||||||
}
|
}
|
||||||
|
|
||||||
// @title casaOS API
|
// @title casaOS API
|
||||||
@@ -95,31 +76,28 @@ func init() {
|
|||||||
// @name Authorization
|
// @name Authorization
|
||||||
// @BasePath /v1
|
// @BasePath /v1
|
||||||
func main() {
|
func main() {
|
||||||
|
service.NotifyMsg = make(chan notify.Message, 10)
|
||||||
if *versionFlag {
|
if *versionFlag {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
go route.SocketInit(service.NotifyMsg)
|
||||||
|
// model.Setup()
|
||||||
|
// gredis.Setup()
|
||||||
|
|
||||||
v1Router := route.InitV1Router()
|
r := route.InitRouter()
|
||||||
|
// service.SyncTask(sqliteDB)
|
||||||
v2Router := route.InitV2Router()
|
|
||||||
v2DocRouter := route.InitV2DocRouter(_docHTML, _docYAML)
|
|
||||||
v3file := route.InitFile()
|
|
||||||
v4dir := route.InitDir()
|
|
||||||
mux := &util_http.HandlerMultiplexer{
|
|
||||||
HandlerMap: map[string]http.Handler{
|
|
||||||
"v1": v1Router,
|
|
||||||
"v2": v2Router,
|
|
||||||
"doc": v2DocRouter,
|
|
||||||
"v3": v3file,
|
|
||||||
"v4": v4dir,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
cron2 := cron.New()
|
cron2 := cron.New()
|
||||||
// every day execution
|
// every day execution
|
||||||
|
|
||||||
err := cron2.AddFunc("0/5 * * * * *", func() {
|
err := cron2.AddFunc("0/5 * * * * *", func() {
|
||||||
route.SendAllHardwareStatusBySocket()
|
if service.ClientCount > 0 {
|
||||||
|
// route.SendNetINfoBySocket()
|
||||||
|
// route.SendCPUBySocket()
|
||||||
|
// route.SendMemBySocket()
|
||||||
|
// route.SendDiskBySocket()
|
||||||
|
// route.SendUSBBySocket()
|
||||||
|
route.SendAllHardwareStatusBySocket()
|
||||||
|
}
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err)
|
fmt.Println(err)
|
||||||
@@ -132,25 +110,10 @@ func main() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
routers := []string{
|
routers := []string{"sys", "apps", "container", "app-categories", "port", "file", "folder", "batch", "image", "samba", "notify"}
|
||||||
"/v1/sys",
|
for _, v := range routers {
|
||||||
"/v1/port",
|
err = service.MyService.Gateway().CreateRoute(&common.Route{
|
||||||
"/v1/file",
|
Path: "/v1/" + v,
|
||||||
"/v1/folder",
|
|
||||||
"/v1/batch",
|
|
||||||
"/v1/image",
|
|
||||||
"/v1/samba",
|
|
||||||
"/v1/notify",
|
|
||||||
"/v1/driver",
|
|
||||||
"/v1/cloud",
|
|
||||||
"/v1/recover",
|
|
||||||
route.V2APIPath,
|
|
||||||
route.V2DocPath,
|
|
||||||
route.V3FilePath,
|
|
||||||
}
|
|
||||||
for _, apiPath := range routers {
|
|
||||||
err = service.MyService.Gateway().CreateRoute(&model.Route{
|
|
||||||
Path: apiPath,
|
|
||||||
Target: "http://" + listener.Addr().String(),
|
Target: "http://" + listener.Addr().String(),
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -159,26 +122,11 @@ func main() {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var events []message_bus.EventType
|
|
||||||
events = append(events, message_bus.EventType{Name: "casaos:system:utilization", SourceID: common.SERVICENAME, PropertyTypeList: []message_bus.PropertyType{}})
|
|
||||||
events = append(events, message_bus.EventType{Name: "casaos:file:recover", SourceID: common.SERVICENAME, PropertyTypeList: []message_bus.PropertyType{}})
|
|
||||||
events = append(events, message_bus.EventType{Name: "casaos:file:operate", SourceID: common.SERVICENAME, PropertyTypeList: []message_bus.PropertyType{}})
|
|
||||||
// register at message bus
|
|
||||||
|
|
||||||
response, err := service.MyService.MessageBus().RegisterEventTypesWithResponse(context.Background(), events)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("error when trying to register one or more event types - some event type will not be discoverable", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
if response != nil && response.StatusCode() != http.StatusOK {
|
|
||||||
logger.Error("error when trying to register one or more event types - some event type will not be discoverable", zap.String("status", response.Status()), zap.String("body", string(response.Body)))
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
time.Sleep(time.Second * 2)
|
time.Sleep(time.Second * 2)
|
||||||
// v0.3.6
|
// v0.3.6
|
||||||
if config.ServerInfo.HttpPort != "" {
|
if config.ServerInfo.HttpPort != "" {
|
||||||
changePort := model.ChangePortRequest{}
|
changePort := common.ChangePortRequest{}
|
||||||
changePort.Port = config.ServerInfo.HttpPort
|
changePort.Port = config.ServerInfo.HttpPort
|
||||||
err := service.MyService.Gateway().ChangePort(&changePort)
|
err := service.MyService.Gateway().ChangePort(&changePort)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@@ -189,38 +137,27 @@ func main() {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
urlFilePath := filepath.Join(config.CommonInfo.RuntimePath, "casaos.url")
|
urlFilePath := filepath.Join(config.CommonInfo.RuntimePath, "casaos.url")
|
||||||
if err := file.CreateFileAndWriteContent(urlFilePath, "http://"+listener.Addr().String()); err != nil {
|
err = file.CreateFileAndWriteContent(urlFilePath, "http://"+listener.Addr().String())
|
||||||
logger.Error("error when creating address file", zap.Error(err),
|
if err != nil {
|
||||||
|
loger.Error("Management service is listening...",
|
||||||
zap.Any("address", listener.Addr().String()),
|
zap.Any("address", listener.Addr().String()),
|
||||||
zap.Any("filepath", urlFilePath),
|
zap.Any("filepath", urlFilePath),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// run any script that needs to be executed
|
|
||||||
scriptDirectory := filepath.Join(constants.DefaultConfigPath, "start.d")
|
|
||||||
command.ExecuteScripts(scriptDirectory)
|
|
||||||
|
|
||||||
if supported, err := daemon.SdNotify(false, daemon.SdNotifyReady); err != nil {
|
if supported, err := daemon.SdNotify(false, daemon.SdNotifyReady); err != nil {
|
||||||
logger.Error("Failed to notify systemd that casaos main service is ready", zap.Any("error", err))
|
loger.Error("Failed to notify systemd that casaos main service is ready", zap.Any("error", err))
|
||||||
} else if supported {
|
} else if supported {
|
||||||
logger.Info("Notified systemd that casaos main service is ready")
|
loger.Info("Notified systemd that casaos main service is ready")
|
||||||
} else {
|
} else {
|
||||||
logger.Info("This process is not running as a systemd service.")
|
loger.Info("This process is not running as a systemd service.")
|
||||||
}
|
}
|
||||||
// http.HandleFunc("/v1/file/test", func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
|
|
||||||
// //http.ServeFile(w, r, r.URL.Path[1:])
|
|
||||||
// http.ServeFile(w, r, "/DATA/test.img")
|
|
||||||
// })
|
|
||||||
// go http.ListenAndServe(":8081", nil)
|
|
||||||
|
|
||||||
s := &http.Server{
|
s := &http.Server{
|
||||||
Handler: mux,
|
Handler: r,
|
||||||
ReadHeaderTimeout: 5 * time.Second, // fix G112: Potential slowloris attack (see https://github.com/securego/gosec)
|
ReadHeaderTimeout: 5 * time.Second, // fix G112: Potential slowloris attack (see https://github.com/securego/gosec)
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Info("CasaOS main service is listening...", zap.Any("address", listener.Addr().String()))
|
|
||||||
|
|
||||||
err = s.Serve(listener) // not using http.serve() to fix G114: Use of net/http serve function that has no support for setting timeouts (see https://github.com/securego/gosec)
|
err = s.Serve(listener) // not using http.serve() to fix G114: Use of net/http serve function that has no support for setting timeouts (see https://github.com/securego/gosec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
|||||||
128
model/app.go
Normal file
128
model/app.go
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"encoding/json"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ServerAppListCollection struct {
|
||||||
|
List []ServerAppList `json:"list"`
|
||||||
|
Recommend []ServerAppList `json:"recommend"`
|
||||||
|
Community []ServerAppList `json:"community"`
|
||||||
|
Version string `json:"version"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// @tiger - 对于用于出参的数据结构,静态信息(例如 title)和
|
||||||
|
// 动态信息(例如 state、query_count)应该划分到不同的数据结构中
|
||||||
|
//
|
||||||
|
// 这样的好处是
|
||||||
|
// 1 - 多次获取动态信息时可以减少出参复杂度,因为静态信息只获取一次就好
|
||||||
|
// 2 - 在未来的迭代中,可以降低维护成本(所有字段都展开放在一个层级维护成本略高)
|
||||||
|
//
|
||||||
|
// 另外,一些针对性字段,例如 Docker 相关的,可以用 map 来保存。
|
||||||
|
// 这样在未来增加多态 App,例如 Snap,不需要维护多个结构,或者一个结构保存不必要的字段
|
||||||
|
type ServerAppList struct {
|
||||||
|
Id uint `gorm:"column:id;primary_key" json:"id"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
Tagline string `json:"tagline"`
|
||||||
|
Tags Strings `gorm:"type:json" json:"tags"`
|
||||||
|
Icon string `json:"icon"`
|
||||||
|
ScreenshotLink Strings `gorm:"type:json" json:"screenshot_link"`
|
||||||
|
Category string `json:"category"`
|
||||||
|
CategoryId int `json:"category_id"`
|
||||||
|
CategoryFont string `json:"category_font"`
|
||||||
|
PortMap string `json:"port_map"`
|
||||||
|
ImageVersion string `json:"image_version"`
|
||||||
|
Tip string `json:"tip"`
|
||||||
|
Envs EnvArray `json:"envs"`
|
||||||
|
Ports PortArray `json:"ports"`
|
||||||
|
Volumes PathArray `json:"volumes"`
|
||||||
|
Devices PathArray `json:"devices"`
|
||||||
|
NetworkModel string `json:"network_model"`
|
||||||
|
Image string `json:"image"`
|
||||||
|
Index string `json:"index"`
|
||||||
|
CreatedAt time.Time `json:"created_at"`
|
||||||
|
UpdatedAt time.Time `json:"updated_at"`
|
||||||
|
State int `json:"state"`
|
||||||
|
Author string `json:"author"`
|
||||||
|
MinMemory int `json:"min_memory"`
|
||||||
|
MinDisk int `json:"min_disk"`
|
||||||
|
MaxMemory uint64 `json:"max_memory"`
|
||||||
|
Thumbnail string `json:"thumbnail"`
|
||||||
|
Healthy string `json:"healthy"`
|
||||||
|
Plugins Strings `json:"plugins"`
|
||||||
|
Origin string `json:"origin"`
|
||||||
|
Type int `json:"type"`
|
||||||
|
QueryCount int `json:"query_count"`
|
||||||
|
Developer string `json:"developer"`
|
||||||
|
HostName string `json:"host_name"`
|
||||||
|
Privileged bool `json:"privileged"`
|
||||||
|
CapAdd Strings `json:"cap_add"`
|
||||||
|
Cmd Strings `json:"cmd"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Ports struct {
|
||||||
|
ContainerPort uint `json:"container_port"`
|
||||||
|
CommendPort int `json:"commend_port"`
|
||||||
|
Desc string `json:"desc"`
|
||||||
|
Type int `json:"type"` // 1:必选 2:可选 3:默认值不必显示 4:系统处理 5:container内容也可编辑
|
||||||
|
}
|
||||||
|
|
||||||
|
type Volume struct {
|
||||||
|
ContainerPath string `json:"container_path"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Desc string `json:"desc"`
|
||||||
|
Type int `json:"type"` // 1:必选 2:可选 3:默认值不必显示 4:系统处理 5:container内容也可编辑
|
||||||
|
}
|
||||||
|
|
||||||
|
type Envs struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Value string `json:"value"`
|
||||||
|
Desc string `json:"desc"`
|
||||||
|
Type int `json:"type"` // 1:必选 2:可选 3:默认值不必显示 4:系统处理 5:container内容也可编辑
|
||||||
|
}
|
||||||
|
|
||||||
|
type Devices struct {
|
||||||
|
ContainerPath string `json:"container_path"`
|
||||||
|
Path string `json:"path"`
|
||||||
|
Desc string `json:"desc"`
|
||||||
|
Type int `json:"type"` // 1:必选 2:可选 3:默认值不必显示 4:系统处理 5:container内容也可编辑
|
||||||
|
}
|
||||||
|
|
||||||
|
type configures struct {
|
||||||
|
TcpPorts []Ports `json:"tcp_ports"`
|
||||||
|
UdpPorts []Ports `json:"udp_ports"`
|
||||||
|
Envs []Envs `json:"envs"`
|
||||||
|
Volumes []Volume `json:"volumes"`
|
||||||
|
Devices []Devices `json:"devices"`
|
||||||
|
}
|
||||||
|
|
||||||
|
/****************使gorm支持[]string结构*******************/
|
||||||
|
type Strings []string
|
||||||
|
|
||||||
|
func (c Strings) Value() (driver.Value, error) {
|
||||||
|
b, err := json.Marshal(c)
|
||||||
|
return string(b), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Strings) Scan(input interface{}) error {
|
||||||
|
return json.Unmarshal(input.([]byte), c)
|
||||||
|
}
|
||||||
|
|
||||||
|
/****************使gorm支持[]string结构*******************/
|
||||||
|
|
||||||
|
/****************使gorm支持[]string结构*******************/
|
||||||
|
type MapStrings []map[string]string
|
||||||
|
|
||||||
|
func (c MapStrings) Value() (driver.Value, error) {
|
||||||
|
b, err := json.Marshal(c)
|
||||||
|
return string(b), err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *MapStrings) Scan(input interface{}) error {
|
||||||
|
return json.Unmarshal(input.([]byte), c)
|
||||||
|
}
|
||||||
|
|
||||||
|
/****************使gorm支持[]string结构*******************/
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ListArgs struct {
|
|
||||||
ReqPath string
|
|
||||||
}
|
|
||||||
|
|
||||||
type LinkArgs struct {
|
|
||||||
IP string
|
|
||||||
Header http.Header
|
|
||||||
Type string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Link struct {
|
|
||||||
URL string `json:"url"`
|
|
||||||
Header http.Header `json:"header"` // needed header
|
|
||||||
Data io.ReadCloser // return file reader directly
|
|
||||||
Status int // status maybe 200 or 206, etc
|
|
||||||
FilePath *string // local file, return the filepath
|
|
||||||
Expiration *time.Duration // url expiration time
|
|
||||||
Method string `json:"method"` // http method
|
|
||||||
}
|
|
||||||
|
|
||||||
type OtherArgs struct {
|
|
||||||
Obj Obj
|
|
||||||
Method string
|
|
||||||
Data interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type FsOtherArgs struct {
|
|
||||||
Path string `json:"path" form:"path"`
|
|
||||||
Method string `json:"method" form:"method"`
|
|
||||||
Data interface{} `json:"data" form:"data"`
|
|
||||||
}
|
|
||||||
23
model/category.go
Normal file
23
model/category.go
Normal file
@@ -0,0 +1,23 @@
|
|||||||
|
/*
|
||||||
|
* @Author: link a624669980@163.com
|
||||||
|
* @Date: 2022-05-16 17:37:08
|
||||||
|
* @LastEditors: LinkLeong
|
||||||
|
* @LastEditTime: 2022-07-13 10:46:38
|
||||||
|
* @FilePath: /CasaOS/model/category.go
|
||||||
|
* @Description:
|
||||||
|
*/
|
||||||
|
package model
|
||||||
|
|
||||||
|
type ServerCategoryList struct {
|
||||||
|
Version string `json:"version"`
|
||||||
|
Item []CategoryList `json:"item"`
|
||||||
|
}
|
||||||
|
type CategoryList struct {
|
||||||
|
Id uint `gorm:"column:id;primary_key" json:"id"`
|
||||||
|
//CreatedAt time.Time `json:"created_at"`
|
||||||
|
//
|
||||||
|
//UpdatedAt time.Time `json:"updated_at"`
|
||||||
|
Font string `json:"font"` // @tiger - 如果这个和前端有关,应该不属于后端的出参范围,而是前端去界定
|
||||||
|
Name string `json:"name"`
|
||||||
|
Count uint `json:"count"` // @tiger - count 属于动态信息,应该单独放在一个出参结构中(原因见另外一个关于 静态/动态 出参的注释)
|
||||||
|
}
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
package model
|
|
||||||
|
|
||||||
type PageResp struct {
|
|
||||||
Content interface{} `json:"content"`
|
|
||||||
Total int64 `json:"total"`
|
|
||||||
}
|
|
||||||
24
model/docker.go
Normal file
24
model/docker.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
/*
|
||||||
|
* @Author: LinkLeong link@icewhale.com
|
||||||
|
* @Date: 2021-12-08 18:10:25
|
||||||
|
* @LastEditors: LinkLeong
|
||||||
|
* @LastEditTime: 2022-07-13 10:49:16
|
||||||
|
* @FilePath: /CasaOS/model/docker.go
|
||||||
|
* @Description:
|
||||||
|
* @Website: https://www.casaos.io
|
||||||
|
* Copyright (c) 2022 by icewhale, All Rights Reserved.
|
||||||
|
*/
|
||||||
|
package model
|
||||||
|
|
||||||
|
type DockerStatsModel struct {
|
||||||
|
Icon string `json:"icon"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
Data interface{} `json:"data"`
|
||||||
|
Previous interface{} `json:"previous"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// reference - https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
|
||||||
|
type DockerDaemonConfigurationModel struct {
|
||||||
|
// e.g. `/var/lib/docker`
|
||||||
|
Root string `json:"data-root,omitempty"`
|
||||||
|
}
|
||||||
133
model/manifest.go
Normal file
133
model/manifest.go
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
package model
|
||||||
|
|
||||||
|
import (
|
||||||
|
"database/sql/driver"
|
||||||
|
"encoding/json"
|
||||||
|
)
|
||||||
|
|
||||||
|
type TcpPorts struct {
|
||||||
|
Desc string `json:"desc"`
|
||||||
|
ContainerPort int `json:"container_port"`
|
||||||
|
}
|
||||||
|
type UdpPorts struct {
|
||||||
|
Desc string `json:"desc"`
|
||||||
|
ContainerPort int `json:"container_port"`
|
||||||
|
}
|
||||||
|
|
||||||
|
/*******************使用gorm支持json************************************/
|
||||||
|
|
||||||
|
type PortMap struct {
|
||||||
|
ContainerPort string `json:"container"`
|
||||||
|
CommendPort string `json:"host"`
|
||||||
|
Protocol string `json:"protocol"`
|
||||||
|
Desc string `json:"desc"`
|
||||||
|
Type int `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PortArray []PortMap
|
||||||
|
|
||||||
|
// Value 实现方法
|
||||||
|
func (p PortArray) Value() (driver.Value, error) {
|
||||||
|
return json.Marshal(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan 实现方法
|
||||||
|
func (p *PortArray) Scan(input interface{}) error {
|
||||||
|
return json.Unmarshal(input.([]byte), p)
|
||||||
|
}
|
||||||
|
|
||||||
|
/************************************************************************/
|
||||||
|
|
||||||
|
/*******************使用gorm支持json************************************/
|
||||||
|
|
||||||
|
type Env struct {
|
||||||
|
Name string `json:"container"`
|
||||||
|
Value string `json:"host"`
|
||||||
|
Desc string `json:"desc"`
|
||||||
|
Type int `json:"type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type JSON json.RawMessage
|
||||||
|
|
||||||
|
type EnvArray []Env
|
||||||
|
|
||||||
|
// Value 实现方法
|
||||||
|
func (p EnvArray) Value() (driver.Value, error) {
|
||||||
|
return json.Marshal(p)
|
||||||
|
//return .MarshalJSON()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan 实现方法
|
||||||
|
func (p *EnvArray) Scan(input interface{}) error {
|
||||||
|
return json.Unmarshal(input.([]byte), p)
|
||||||
|
}
|
||||||
|
|
||||||
|
/************************************************************************/
|
||||||
|
|
||||||
|
/*******************使用gorm支持json************************************/
|
||||||
|
|
||||||
|
type PathMap struct {
|
||||||
|
ContainerPath string `json:"container"`
|
||||||
|
Path string `json:"host"`
|
||||||
|
Type int `json:"type"`
|
||||||
|
Desc string `json:"desc"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type PathArray []PathMap
|
||||||
|
|
||||||
|
// Value 实现方法
|
||||||
|
func (p PathArray) Value() (driver.Value, error) {
|
||||||
|
return json.Marshal(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scan 实现方法
|
||||||
|
func (p *PathArray) Scan(input interface{}) error {
|
||||||
|
return json.Unmarshal(input.([]byte), p)
|
||||||
|
}
|
||||||
|
|
||||||
|
/************************************************************************/
|
||||||
|
|
||||||
|
//type PostData struct {
|
||||||
|
// Envs EnvArrey `json:"envs,omitempty"`
|
||||||
|
// Udp PortArrey `json:"udp_ports"`
|
||||||
|
// Tcp PortArrey `json:"tcp_ports"`
|
||||||
|
// Volumes PathArrey `json:"volumes"`
|
||||||
|
// Devices PathArrey `json:"devices"`
|
||||||
|
// Port string `json:"port,omitempty"`
|
||||||
|
// PortMap string `json:"port_map"`
|
||||||
|
// CpuShares int64 `json:"cpu_shares,omitempty"`
|
||||||
|
// Memory int64 `json:"memory,omitempty"`
|
||||||
|
// Restart string `json:"restart,omitempty"`
|
||||||
|
// EnableUPNP bool `json:"enable_upnp"`
|
||||||
|
// Label string `json:"label"`
|
||||||
|
// Position bool `json:"position"`
|
||||||
|
//}
|
||||||
|
|
||||||
|
type CustomizationPostData struct {
|
||||||
|
ContainerName string `json:"container_name"`
|
||||||
|
CustomId string `json:"custom_id"`
|
||||||
|
Origin string `json:"origin"`
|
||||||
|
NetworkModel string `json:"network_model"`
|
||||||
|
Index string `json:"index"`
|
||||||
|
Icon string `json:"icon"`
|
||||||
|
Image string `json:"image"`
|
||||||
|
Envs EnvArray `json:"envs"`
|
||||||
|
Ports PortArray `json:"ports"`
|
||||||
|
Volumes PathArray `json:"volumes"`
|
||||||
|
Devices PathArray `json:"devices"`
|
||||||
|
//Port string `json:"port,omitempty"`
|
||||||
|
PortMap string `json:"port_map"`
|
||||||
|
CpuShares int64 `json:"cpu_shares"`
|
||||||
|
Memory int64 `json:"memory"`
|
||||||
|
Restart string `json:"restart"`
|
||||||
|
EnableUPNP bool `json:"enable_upnp"`
|
||||||
|
Label string `json:"label"`
|
||||||
|
Description string `json:"description"`
|
||||||
|
Position bool `json:"position"`
|
||||||
|
HostName string `json:"host_name"`
|
||||||
|
Privileged bool `json:"privileged"`
|
||||||
|
CapAdd []string `json:"cap_add"`
|
||||||
|
Cmd []string `json:"cmd"`
|
||||||
|
Protocol string `json:"protocol"`
|
||||||
|
Host string `json:"host"`
|
||||||
|
}
|
||||||
21
model/notify/application.go
Normal file
21
model/notify/application.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
/*
|
||||||
|
* @Author: LinkLeong link@icewhale.com
|
||||||
|
* @Date: 2022-05-27 15:01:58
|
||||||
|
* @LastEditors: LinkLeong
|
||||||
|
* @LastEditTime: 2022-05-31 14:51:21
|
||||||
|
* @FilePath: /CasaOS/model/notify/application.go
|
||||||
|
* @Description:
|
||||||
|
* @Website: https://www.casaos.io
|
||||||
|
* Copyright (c) 2022 by icewhale, All Rights Reserved.
|
||||||
|
*/
|
||||||
|
package notify
|
||||||
|
|
||||||
|
type Application struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
State string `json:"state"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Icon string `json:"icon"`
|
||||||
|
Message string `json:"message"`
|
||||||
|
Finished bool `json:"finished"`
|
||||||
|
Success bool `json:"success"`
|
||||||
|
}
|
||||||
20
model/notify/message.go
Normal file
20
model/notify/message.go
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
/*
|
||||||
|
* @Author: LinkLeong link@icewhale.com
|
||||||
|
* @Date: 2022-05-26 14:39:22
|
||||||
|
* @LastEditors: LinkLeong
|
||||||
|
* @LastEditTime: 2022-05-26 19:08:52
|
||||||
|
* @FilePath: /CasaOS/model/notify/message.go
|
||||||
|
* @Description:
|
||||||
|
* @Website: https://www.casaos.io
|
||||||
|
* Copyright (c) 2022 by icewhale, All Rights Reserved.
|
||||||
|
*/
|
||||||
|
package notify
|
||||||
|
|
||||||
|
import (
|
||||||
|
f "github.com/ambelovsky/gosf"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Message struct {
|
||||||
|
Path string `json:"path"`
|
||||||
|
Msg f.Message `json:"msg"`
|
||||||
|
}
|
||||||
186
model/obj.go
186
model/obj.go
@@ -1,186 +0,0 @@
|
|||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
mapset "github.com/deckarep/golang-set/v2"
|
|
||||||
|
|
||||||
"github.com/maruel/natural"
|
|
||||||
)
|
|
||||||
|
|
||||||
type UnwrapObj interface {
|
|
||||||
Unwrap() Obj
|
|
||||||
}
|
|
||||||
|
|
||||||
type Obj interface {
|
|
||||||
GetSize() int64
|
|
||||||
GetName() string
|
|
||||||
ModTime() time.Time
|
|
||||||
IsDir() bool
|
|
||||||
|
|
||||||
// The internal information of the driver.
|
|
||||||
// If you want to use it, please understand what it means
|
|
||||||
GetID() string
|
|
||||||
GetPath() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type FileStreamer interface {
|
|
||||||
io.ReadCloser
|
|
||||||
Obj
|
|
||||||
GetMimetype() string
|
|
||||||
SetReadCloser(io.ReadCloser)
|
|
||||||
NeedStore() bool
|
|
||||||
GetReadCloser() io.ReadCloser
|
|
||||||
GetOld() Obj
|
|
||||||
}
|
|
||||||
|
|
||||||
type URL interface {
|
|
||||||
URL() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Thumb interface {
|
|
||||||
Thumb() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type SetPath interface {
|
|
||||||
SetPath(path string)
|
|
||||||
}
|
|
||||||
|
|
||||||
func SortFiles(objs []Obj, orderBy, orderDirection string) {
|
|
||||||
if orderBy == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
sort.Slice(objs, func(i, j int) bool {
|
|
||||||
switch orderBy {
|
|
||||||
case "name":
|
|
||||||
{
|
|
||||||
c := natural.Less(objs[i].GetName(), objs[j].GetName())
|
|
||||||
if orderDirection == "desc" {
|
|
||||||
return !c
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
case "size":
|
|
||||||
{
|
|
||||||
if orderDirection == "desc" {
|
|
||||||
return objs[i].GetSize() >= objs[j].GetSize()
|
|
||||||
}
|
|
||||||
return objs[i].GetSize() <= objs[j].GetSize()
|
|
||||||
}
|
|
||||||
case "modified":
|
|
||||||
if orderDirection == "desc" {
|
|
||||||
return objs[i].ModTime().After(objs[j].ModTime())
|
|
||||||
}
|
|
||||||
return objs[i].ModTime().Before(objs[j].ModTime())
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExtractFolder(objs []Obj, extractFolder string) {
|
|
||||||
if extractFolder == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
front := extractFolder == "front"
|
|
||||||
sort.SliceStable(objs, func(i, j int) bool {
|
|
||||||
if objs[i].IsDir() || objs[j].IsDir() {
|
|
||||||
if !objs[i].IsDir() {
|
|
||||||
return !front
|
|
||||||
}
|
|
||||||
if !objs[j].IsDir() {
|
|
||||||
return front
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wrap
|
|
||||||
func WrapObjName(objs Obj) Obj {
|
|
||||||
return &ObjWrapName{Obj: objs}
|
|
||||||
}
|
|
||||||
|
|
||||||
func WrapObjsName(objs []Obj) {
|
|
||||||
for i := 0; i < len(objs); i++ {
|
|
||||||
objs[i] = &ObjWrapName{Obj: objs[i]}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func UnwrapObjs(obj Obj) Obj {
|
|
||||||
if unwrap, ok := obj.(UnwrapObj); ok {
|
|
||||||
obj = unwrap.Unwrap()
|
|
||||||
}
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetThumb(obj Obj) (thumb string, ok bool) {
|
|
||||||
if obj, ok := obj.(Thumb); ok {
|
|
||||||
return obj.Thumb(), true
|
|
||||||
}
|
|
||||||
if unwrap, ok := obj.(UnwrapObj); ok {
|
|
||||||
return GetThumb(unwrap.Unwrap())
|
|
||||||
}
|
|
||||||
return thumb, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetUrl(obj Obj) (url string, ok bool) {
|
|
||||||
if obj, ok := obj.(URL); ok {
|
|
||||||
return obj.URL(), true
|
|
||||||
}
|
|
||||||
if unwrap, ok := obj.(UnwrapObj); ok {
|
|
||||||
return GetUrl(unwrap.Unwrap())
|
|
||||||
}
|
|
||||||
return url, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Merge
|
|
||||||
func NewObjMerge() *ObjMerge {
|
|
||||||
return &ObjMerge{
|
|
||||||
set: mapset.NewSet[string](),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type ObjMerge struct {
|
|
||||||
regs []*regexp.Regexp
|
|
||||||
set mapset.Set[string]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (om *ObjMerge) Merge(objs []Obj, objs_ ...Obj) []Obj {
|
|
||||||
newObjs := make([]Obj, 0, len(objs)+len(objs_))
|
|
||||||
newObjs = om.insertObjs(om.insertObjs(newObjs, objs...), objs_...)
|
|
||||||
return newObjs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (om *ObjMerge) insertObjs(objs []Obj, objs_ ...Obj) []Obj {
|
|
||||||
for _, obj := range objs_ {
|
|
||||||
if om.clickObj(obj) {
|
|
||||||
objs = append(objs, obj)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return objs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (om *ObjMerge) clickObj(obj Obj) bool {
|
|
||||||
for _, reg := range om.regs {
|
|
||||||
if reg.MatchString(obj.GetName()) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return om.set.Add(obj.GetName())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (om *ObjMerge) InitHideReg(hides string) {
|
|
||||||
rs := strings.Split(hides, "\n")
|
|
||||||
om.regs = make([]*regexp.Regexp, 0, len(rs))
|
|
||||||
for _, r := range rs {
|
|
||||||
om.regs = append(om.regs, regexp.MustCompile(r))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (om *ObjMerge) Reset() {
|
|
||||||
om.set.Clear()
|
|
||||||
}
|
|
||||||
@@ -1,90 +0,0 @@
|
|||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type ObjWrapName struct {
|
|
||||||
Name string
|
|
||||||
Obj
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ObjWrapName) Unwrap() Obj {
|
|
||||||
return o.Obj
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *ObjWrapName) GetName() string {
|
|
||||||
if o.Name == "" {
|
|
||||||
o.Name = o.Obj.GetName()
|
|
||||||
}
|
|
||||||
return o.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
type Object struct {
|
|
||||||
ID string
|
|
||||||
Path string
|
|
||||||
Name string
|
|
||||||
Size int64
|
|
||||||
Modified time.Time
|
|
||||||
IsFolder bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Object) GetName() string {
|
|
||||||
return o.Name
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Object) GetSize() int64 {
|
|
||||||
return o.Size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Object) ModTime() time.Time {
|
|
||||||
return o.Modified
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Object) IsDir() bool {
|
|
||||||
return o.IsFolder
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Object) GetID() string {
|
|
||||||
return o.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Object) GetPath() string {
|
|
||||||
return o.Path
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Object) SetPath(id string) {
|
|
||||||
o.Path = id
|
|
||||||
}
|
|
||||||
|
|
||||||
type Thumbnail struct {
|
|
||||||
Thumbnail string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Url struct {
|
|
||||||
Url string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w Url) URL() string {
|
|
||||||
return w.Url
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t Thumbnail) Thumb() string {
|
|
||||||
return t.Thumbnail
|
|
||||||
}
|
|
||||||
|
|
||||||
type ObjThumb struct {
|
|
||||||
Object
|
|
||||||
Thumbnail
|
|
||||||
}
|
|
||||||
|
|
||||||
type ObjectURL struct {
|
|
||||||
Object
|
|
||||||
Url
|
|
||||||
}
|
|
||||||
|
|
||||||
type ObjThumbURL struct {
|
|
||||||
Object
|
|
||||||
Thumbnail
|
|
||||||
Url
|
|
||||||
}
|
|
||||||
23
model/req.go
23
model/req.go
@@ -1,23 +0,0 @@
|
|||||||
package model
|
|
||||||
|
|
||||||
type PageReq struct {
|
|
||||||
Index int `json:"page" form:"index"`
|
|
||||||
Size int `json:"size" form:"size"`
|
|
||||||
}
|
|
||||||
|
|
||||||
const MaxUint = ^uint(0)
|
|
||||||
const MinUint = 0
|
|
||||||
const MaxInt = int(MaxUint >> 1)
|
|
||||||
const MinInt = -MaxInt - 1
|
|
||||||
|
|
||||||
func (p *PageReq) Validate() {
|
|
||||||
if p.Index < 1 {
|
|
||||||
p.Index = 1
|
|
||||||
}
|
|
||||||
if p.Size < 1 {
|
|
||||||
p.Size = 100000
|
|
||||||
}
|
|
||||||
// if p.PerPage < 1 {
|
|
||||||
// p.PerPage = MaxInt
|
|
||||||
// }
|
|
||||||
}
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
package model
|
|
||||||
|
|
||||||
const (
|
|
||||||
SINGLE = iota
|
|
||||||
SITE
|
|
||||||
STYLE
|
|
||||||
PREVIEW
|
|
||||||
GLOBAL
|
|
||||||
ARIA2
|
|
||||||
INDEX
|
|
||||||
GITHUB
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
PUBLIC = iota
|
|
||||||
PRIVATE
|
|
||||||
READONLY
|
|
||||||
DEPRECATED
|
|
||||||
)
|
|
||||||
|
|
||||||
type SettingItem struct {
|
|
||||||
Key string `json:"key" gorm:"primaryKey" binding:"required"` // unique key
|
|
||||||
Value string `json:"value"` // value
|
|
||||||
Help string `json:"help"` // help message
|
|
||||||
Type string `json:"type"` // string, number, bool, select
|
|
||||||
Options string `json:"options"` // values for select
|
|
||||||
Group int `json:"group"` // use to group setting in frontend
|
|
||||||
Flag int `json:"flag"` // 0 = public, 1 = private, 2 = readonly, 3 = deprecated, etc.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SettingItem) IsDeprecated() bool {
|
|
||||||
return s.Flag == DEPRECATED
|
|
||||||
}
|
|
||||||
69
model/smartctl_model.go
Normal file
69
model/smartctl_model.go
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
package model
|
||||||
|
|
||||||
|
//
|
||||||
|
type SmartctlA struct {
|
||||||
|
Smartctl struct {
|
||||||
|
Version []int `json:"version"`
|
||||||
|
SvnRevision string `json:"svn_revision"`
|
||||||
|
PlatformInfo string `json:"platform_info"`
|
||||||
|
BuildInfo string `json:"build_info"`
|
||||||
|
Argv []string `json:"argv"`
|
||||||
|
ExitStatus int `json:"exit_status"`
|
||||||
|
} `json:"smartctl"`
|
||||||
|
Device struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
InfoName string `json:"info_name"`
|
||||||
|
Type string `json:"type"`
|
||||||
|
Protocol string `json:"protocol"`
|
||||||
|
} `json:"device"`
|
||||||
|
ModelName string `json:"model_name"`
|
||||||
|
SerialNumber string `json:"serial_number"`
|
||||||
|
FirmwareVersion string `json:"firmware_version"`
|
||||||
|
UserCapacity struct {
|
||||||
|
Blocks int `json:"blocks"`
|
||||||
|
Bytes int64 `json:"bytes"`
|
||||||
|
} `json:"user_capacity"`
|
||||||
|
SmartStatus struct {
|
||||||
|
Passed bool `json:"passed"`
|
||||||
|
} `json:"smart_status"`
|
||||||
|
AtaSmartData struct {
|
||||||
|
OfflineDataCollection struct {
|
||||||
|
Status struct {
|
||||||
|
Value int `json:"value"`
|
||||||
|
String string `json:"string"`
|
||||||
|
} `json:"status"`
|
||||||
|
CompletionSeconds int `json:"completion_seconds"`
|
||||||
|
} `json:"offline_data_collection"`
|
||||||
|
SelfTest struct {
|
||||||
|
Status struct {
|
||||||
|
Value int `json:"value"`
|
||||||
|
String string `json:"string"`
|
||||||
|
Passed bool `json:"passed"`
|
||||||
|
} `json:"status"`
|
||||||
|
PollingMinutes struct {
|
||||||
|
Short int `json:"short"`
|
||||||
|
Extended int `json:"extended"`
|
||||||
|
Conveyance int `json:"conveyance"`
|
||||||
|
} `json:"polling_minutes"`
|
||||||
|
} `json:"self_test"`
|
||||||
|
Capabilities struct {
|
||||||
|
Values []int `json:"values"`
|
||||||
|
ExecOfflineImmediateSupported bool `json:"exec_offline_immediate_supported"`
|
||||||
|
OfflineIsAbortedUponNewCmd bool `json:"offline_is_aborted_upon_new_cmd"`
|
||||||
|
OfflineSurfaceScanSupported bool `json:"offline_surface_scan_supported"`
|
||||||
|
SelfTestsSupported bool `json:"self_tests_supported"`
|
||||||
|
ConveyanceSelfTestSupported bool `json:"conveyance_self_test_supported"`
|
||||||
|
SelectiveSelfTestSupported bool `json:"selective_self_test_supported"`
|
||||||
|
AttributeAutosaveEnabled bool `json:"attribute_autosave_enabled"`
|
||||||
|
ErrorLoggingSupported bool `json:"error_logging_supported"`
|
||||||
|
GpLoggingSupported bool `json:"gp_logging_supported"`
|
||||||
|
} `json:"capabilities"`
|
||||||
|
} `json:"ata_smart_data"`
|
||||||
|
PowerOnTime struct {
|
||||||
|
Hours int `json:"hours"`
|
||||||
|
} `json:"power_on_time"`
|
||||||
|
PowerCycleCount int `json:"power_cycle_count"`
|
||||||
|
Temperature struct {
|
||||||
|
Current int `json:"current"`
|
||||||
|
} `json:"temperature"`
|
||||||
|
}
|
||||||
@@ -1,54 +0,0 @@
|
|||||||
package model
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
type Storage struct {
|
|
||||||
ID uint `json:"id" gorm:"primaryKey"` // unique key
|
|
||||||
MountPath string `json:"mount_path" gorm:"unique" binding:"required"` // must be standardized
|
|
||||||
Order int `json:"order"` // use to sort
|
|
||||||
Driver string `json:"driver"` // driver used
|
|
||||||
CacheExpiration int `json:"cache_expiration"` // cache expire time
|
|
||||||
Status string `json:"status"`
|
|
||||||
Addition string `json:"addition" gorm:"type:text"` // Additional information, defined in the corresponding driver
|
|
||||||
Remark string `json:"remark"`
|
|
||||||
Modified time.Time `json:"modified"`
|
|
||||||
Disabled bool `json:"disabled"` // if disabled
|
|
||||||
Sort
|
|
||||||
Proxy
|
|
||||||
}
|
|
||||||
|
|
||||||
type Sort struct {
|
|
||||||
OrderBy string `json:"order_by"`
|
|
||||||
OrderDirection string `json:"order_direction"`
|
|
||||||
ExtractFolder string `json:"extract_folder"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Proxy struct {
|
|
||||||
WebProxy bool `json:"web_proxy"`
|
|
||||||
WebdavPolicy string `json:"webdav_policy"`
|
|
||||||
DownProxyUrl string `json:"down_proxy_url"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) GetStorage() *Storage {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) SetStorage(storage Storage) {
|
|
||||||
*s = storage
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Storage) SetStatus(status string) {
|
|
||||||
s.Status = status
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p Proxy) Webdav302() bool {
|
|
||||||
return p.WebdavPolicy == "302_redirect"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p Proxy) WebdavProxy() bool {
|
|
||||||
return p.WebdavPolicy == "use_proxy_url"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p Proxy) WebdavNative() bool {
|
|
||||||
return !p.Webdav302() && !p.WebdavProxy()
|
|
||||||
}
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
package model
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
type FileStream struct {
|
|
||||||
Obj
|
|
||||||
io.ReadCloser
|
|
||||||
Mimetype string
|
|
||||||
WebPutAsTask bool
|
|
||||||
Old Obj
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileStream) GetMimetype() string {
|
|
||||||
return f.Mimetype
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileStream) NeedStore() bool {
|
|
||||||
return f.WebPutAsTask
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileStream) GetReadCloser() io.ReadCloser {
|
|
||||||
return f.ReadCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileStream) SetReadCloser(rc io.ReadCloser) {
|
|
||||||
f.ReadCloser = rc
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FileStream) GetOld() Obj {
|
|
||||||
return f.Old
|
|
||||||
}
|
|
||||||
@@ -14,7 +14,7 @@ import "time"
|
|||||||
|
|
||||||
// 系统配置
|
// 系统配置
|
||||||
type SysInfoModel struct {
|
type SysInfoModel struct {
|
||||||
Name string // 系统名称
|
Name string //系统名称
|
||||||
}
|
}
|
||||||
|
|
||||||
// 服务配置
|
// 服务配置
|
||||||
@@ -25,7 +25,7 @@ type ServerModel struct {
|
|||||||
LockAccount bool
|
LockAccount bool
|
||||||
Token string
|
Token string
|
||||||
USBAutoMount string
|
USBAutoMount string
|
||||||
UpdateUrl string
|
SocketPort string
|
||||||
}
|
}
|
||||||
|
|
||||||
// 服务配置
|
// 服务配置
|
||||||
@@ -65,12 +65,11 @@ type SystemConfig struct {
|
|||||||
ConfigPath string `json:"config_path"`
|
ConfigPath string `json:"config_path"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type CasaOSGlobalVariables struct {
|
||||||
|
AppChange bool
|
||||||
|
}
|
||||||
|
|
||||||
type FileSetting struct {
|
type FileSetting struct {
|
||||||
ShareDir []string `json:"share_dir" delim:"|"`
|
ShareDir []string `json:"share_dir" delim:"|"`
|
||||||
DownloadDir string `json:"download_dir"`
|
DownloadDir string `json:"download_dir"`
|
||||||
}
|
}
|
||||||
type BaseInfo struct {
|
|
||||||
Hash string `json:"i"`
|
|
||||||
Version string `json:"v"`
|
|
||||||
Channel string `json:"c,omitempty"`
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -31,20 +31,23 @@ var AppInfo = &model.APPModel{}
|
|||||||
|
|
||||||
var CommonInfo = &model.CommonModel{}
|
var CommonInfo = &model.CommonModel{}
|
||||||
|
|
||||||
// var RedisInfo = &model.RedisModel{}
|
//var RedisInfo = &model.RedisModel{}
|
||||||
|
|
||||||
// server相关
|
// server相关
|
||||||
var ServerInfo = &model.ServerModel{}
|
var ServerInfo = &model.ServerModel{}
|
||||||
|
|
||||||
var SystemConfigInfo = &model.SystemConfig{}
|
var SystemConfigInfo = &model.SystemConfig{}
|
||||||
|
|
||||||
|
var CasaOSGlobalVariables = &model.CasaOSGlobalVariables{}
|
||||||
|
|
||||||
var FileSettingInfo = &model.FileSetting{}
|
var FileSettingInfo = &model.FileSetting{}
|
||||||
|
|
||||||
var Cfg *ini.File
|
var Cfg *ini.File
|
||||||
|
|
||||||
// 初始化设置,获取系统的部分信息。
|
// 初始化设置,获取系统的部分信息。
|
||||||
func InitSetup(config string) {
|
func InitSetup(config string) {
|
||||||
configDir := USERCONFIGURL
|
|
||||||
|
var configDir = USERCONFIGURL
|
||||||
if len(config) > 0 {
|
if len(config) > 0 {
|
||||||
configDir = config
|
configDir = config
|
||||||
}
|
}
|
||||||
@@ -52,7 +55,7 @@ func InitSetup(config string) {
|
|||||||
configDir = "./conf/conf.conf"
|
configDir = "./conf/conf.conf"
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
// 读取文件
|
//读取文件
|
||||||
Cfg, err = ini.Load(configDir)
|
Cfg, err = ini.Load(configDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Cfg, err = ini.Load("/etc/casaos.conf")
|
Cfg, err = ini.Load("/etc/casaos.conf")
|
||||||
@@ -65,7 +68,7 @@ func InitSetup(config string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
mapTo("app", AppInfo)
|
mapTo("app", AppInfo)
|
||||||
// mapTo("redis", RedisInfo)
|
//mapTo("redis", RedisInfo)
|
||||||
mapTo("server", ServerInfo)
|
mapTo("server", ServerInfo)
|
||||||
mapTo("system", SystemConfigInfo)
|
mapTo("system", SystemConfigInfo)
|
||||||
mapTo("file", FileSettingInfo)
|
mapTo("file", FileSettingInfo)
|
||||||
@@ -88,6 +91,7 @@ func InitSetup(config string) {
|
|||||||
}
|
}
|
||||||
Cfg.SaveTo(configDir)
|
Cfg.SaveTo(configDir)
|
||||||
// AppInfo.ProjectPath = getCurrentDirectory() //os.Getwd()
|
// AppInfo.ProjectPath = getCurrentDirectory() //os.Getwd()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// 映射
|
// 映射
|
||||||
@@ -107,7 +111,6 @@ func getCurrentAbPathByCaller() string {
|
|||||||
}
|
}
|
||||||
return abPath
|
return abPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCurrentDirectory() string {
|
func getCurrentDirectory() string {
|
||||||
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
|
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
3
pkg/docker/emum.go
Normal file
3
pkg/docker/emum.go
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
package docker
|
||||||
|
|
||||||
|
const NETWORKNAME = "oasis"
|
||||||
402
pkg/docker/helper.go
Normal file
402
pkg/docker/helper.go
Normal file
@@ -0,0 +1,402 @@
|
|||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
json2 "encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"regexp"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gorilla/websocket"
|
||||||
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewSshClient(user, password string, port string) (*ssh.Client, error) {
|
||||||
|
// connet to ssh
|
||||||
|
// addr = fmt.Sprintf("%s:%d", host, port)
|
||||||
|
|
||||||
|
config := &ssh.ClientConfig{
|
||||||
|
Timeout: time.Second * 5,
|
||||||
|
User: user,
|
||||||
|
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
|
||||||
|
// HostKeyCallback: ,
|
||||||
|
// HostKeyCallback: hostKeyCallBackFunc(h.Host),
|
||||||
|
}
|
||||||
|
// if h.Type == "password" {
|
||||||
|
config.Auth = []ssh.AuthMethod{ssh.Password(password)}
|
||||||
|
//} else {
|
||||||
|
// config.Auth = []ssh.AuthMethod{publicKeyAuthFunc(h.Key)}
|
||||||
|
//}
|
||||||
|
addr := fmt.Sprintf("%s:%s", "127.0.0.1", port)
|
||||||
|
c, err := ssh.Dial("tcp", addr, config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup ssh shell session
|
||||||
|
// set Session and StdinPipe here,
|
||||||
|
// and the Session.Stdout and Session.Sdterr are also set.
|
||||||
|
func NewSshConn(cols, rows int, sshClient *ssh.Client) (*SshConn, error) {
|
||||||
|
sshSession, err := sshClient.NewSession()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stdinP, err := sshSession.StdinPipe()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
comboWriter := new(wsBufferWriter)
|
||||||
|
|
||||||
|
sshSession.Stdout = comboWriter
|
||||||
|
sshSession.Stderr = comboWriter
|
||||||
|
|
||||||
|
modes := ssh.TerminalModes{
|
||||||
|
ssh.ECHO: 1, // disable echo
|
||||||
|
ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
|
||||||
|
ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
|
||||||
|
}
|
||||||
|
// Request pseudo terminal
|
||||||
|
if err := sshSession.RequestPty("xterm", rows, cols, modes); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
// Start remote shell
|
||||||
|
if err := sshSession.Shell(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &SshConn{StdinPipe: stdinP, ComboOutput: comboWriter, Session: sshSession}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type SshConn struct {
|
||||||
|
// calling Write() to write data into ssh server
|
||||||
|
StdinPipe io.WriteCloser
|
||||||
|
// Write() be called to receive data from ssh server
|
||||||
|
ComboOutput *wsBufferWriter
|
||||||
|
Session *ssh.Session
|
||||||
|
}
|
||||||
|
type wsBufferWriter struct {
|
||||||
|
buffer bytes.Buffer
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *wsBufferWriter) Write(p []byte) (int, error) {
|
||||||
|
w.mu.Lock()
|
||||||
|
defer w.mu.Unlock()
|
||||||
|
return w.buffer.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *SshConn) Close() {
|
||||||
|
if s.Session != nil {
|
||||||
|
s.Session.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
wsMsgCmd = "cmd"
|
||||||
|
wsMsgResize = "resize"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReceiveWsMsg receive websocket msg do some handling then write into ssh.session.stdin
|
||||||
|
func ReceiveWsMsgUser(wsConn *websocket.Conn, logBuff *bytes.Buffer) string {
|
||||||
|
// tells other go routine quit
|
||||||
|
username := ""
|
||||||
|
for {
|
||||||
|
|
||||||
|
// read websocket msg
|
||||||
|
_, wsData, err := wsConn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
msgObj := wsMsg{}
|
||||||
|
if err := json2.Unmarshal(wsData, &msgObj); err != nil {
|
||||||
|
msgObj.Type = "cmd"
|
||||||
|
msgObj.Cmd = string(wsData)
|
||||||
|
}
|
||||||
|
//if err := json.Unmarshal(wsData, &msgObj); err != nil {
|
||||||
|
// logrus.WithError(err).WithField("wsData", string(wsData)).Error("unmarshal websocket message failed")
|
||||||
|
//}
|
||||||
|
switch msgObj.Type {
|
||||||
|
case wsMsgCmd:
|
||||||
|
// handle xterm.js stdin
|
||||||
|
// decodeBytes, err := base64.StdEncoding.DecodeString(msgObj.Cmd)
|
||||||
|
decodeBytes := []byte(msgObj.Cmd)
|
||||||
|
if msgObj.Cmd == "\u007f" {
|
||||||
|
if len(username) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
wsConn.WriteMessage(websocket.TextMessage, []byte("\b\x1b[K"))
|
||||||
|
username = username[:len(username)-1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if msgObj.Cmd == "\r" {
|
||||||
|
return username
|
||||||
|
}
|
||||||
|
username += msgObj.Cmd
|
||||||
|
|
||||||
|
if err := wsConn.WriteMessage(websocket.TextMessage, decodeBytes); err != nil {
|
||||||
|
logrus.WithError(err).Error("ws cmd bytes write to ssh.stdin pipe failed")
|
||||||
|
}
|
||||||
|
// write input cmd to log buffer
|
||||||
|
if _, err := logBuff.Write(decodeBytes); err != nil {
|
||||||
|
logrus.WithError(err).Error("write received cmd into log buffer failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReceiveWsMsgPassword(wsConn *websocket.Conn, logBuff *bytes.Buffer) string {
|
||||||
|
// tells other go routine quit
|
||||||
|
password := ""
|
||||||
|
for {
|
||||||
|
|
||||||
|
// read websocket msg
|
||||||
|
_, wsData, err := wsConn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Error("reading webSocket message failed")
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
msgObj := wsMsg{}
|
||||||
|
if err := json2.Unmarshal(wsData, &msgObj); err != nil {
|
||||||
|
msgObj.Type = "cmd"
|
||||||
|
msgObj.Cmd = string(wsData)
|
||||||
|
}
|
||||||
|
//if err := json.Unmarshal(wsData, &msgObj); err != nil {
|
||||||
|
// logrus.WithError(err).WithField("wsData", string(wsData)).Error("unmarshal websocket message failed")
|
||||||
|
//}
|
||||||
|
switch msgObj.Type {
|
||||||
|
case wsMsgCmd:
|
||||||
|
// handle xterm.js stdin
|
||||||
|
// decodeBytes, err := base64.StdEncoding.DecodeString(msgObj.Cmd)
|
||||||
|
if msgObj.Cmd == "\r" {
|
||||||
|
return password
|
||||||
|
}
|
||||||
|
|
||||||
|
if msgObj.Cmd == "\u007f" {
|
||||||
|
if len(password) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
password = password[:len(password)-1]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
password += msgObj.Cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReceiveWsMsg receive websocket msg do some handling then write into ssh.session.stdin
|
||||||
|
func (ssConn *SshConn) ReceiveWsMsg(wsConn *websocket.Conn, logBuff *bytes.Buffer, exitCh chan bool) {
|
||||||
|
// tells other go routine quit
|
||||||
|
defer setQuit(exitCh)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-exitCh:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
// read websocket msg
|
||||||
|
_, wsData, err := wsConn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Error("reading webSocket message failed")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
//unmashal bytes into struct
|
||||||
|
//msgObj := wsMsg{
|
||||||
|
// Type: "cmd",
|
||||||
|
// Cmd: "",
|
||||||
|
// Rows: 50,
|
||||||
|
// Cols: 180,
|
||||||
|
//}
|
||||||
|
msgObj := wsMsg{}
|
||||||
|
if err := json2.Unmarshal(wsData, &msgObj); err != nil {
|
||||||
|
msgObj.Type = "cmd"
|
||||||
|
msgObj.Cmd = string(wsData)
|
||||||
|
}
|
||||||
|
//if err := json.Unmarshal(wsData, &msgObj); err != nil {
|
||||||
|
// logrus.WithError(err).WithField("wsData", string(wsData)).Error("unmarshal websocket message failed")
|
||||||
|
//}
|
||||||
|
switch msgObj.Type {
|
||||||
|
|
||||||
|
case wsMsgResize:
|
||||||
|
// handle xterm.js size change
|
||||||
|
if msgObj.Cols > 0 && msgObj.Rows > 0 {
|
||||||
|
if err := ssConn.Session.WindowChange(msgObj.Rows, msgObj.Cols); err != nil {
|
||||||
|
logrus.WithError(err).Error("ssh pty change windows size failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case wsMsgCmd:
|
||||||
|
// handle xterm.js stdin
|
||||||
|
// decodeBytes, err := base64.StdEncoding.DecodeString(msgObj.Cmd)
|
||||||
|
decodeBytes := []byte(msgObj.Cmd)
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Error("websock cmd string base64 decoding failed")
|
||||||
|
}
|
||||||
|
if _, err := ssConn.StdinPipe.Write(decodeBytes); err != nil {
|
||||||
|
logrus.WithError(err).Error("ws cmd bytes write to ssh.stdin pipe failed")
|
||||||
|
}
|
||||||
|
// write input cmd to log buffer
|
||||||
|
if _, err := logBuff.Write(decodeBytes); err != nil {
|
||||||
|
logrus.WithError(err).Error("write received cmd into log buffer failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ssConn *SshConn) SendComboOutput(wsConn *websocket.Conn, exitCh chan bool) {
|
||||||
|
// tells other go routine quit
|
||||||
|
// defer setQuit(exitCh)
|
||||||
|
|
||||||
|
// every 120ms write combine output bytes into websocket response
|
||||||
|
tick := time.NewTicker(time.Millisecond * time.Duration(120))
|
||||||
|
// for range time.Tick(120 * time.Millisecond){}
|
||||||
|
defer tick.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-tick.C:
|
||||||
|
// write combine output bytes into websocket response
|
||||||
|
if err := flushComboOutput(ssConn.ComboOutput, wsConn); err != nil {
|
||||||
|
logrus.WithError(err).Error("ssh sending combo output to webSocket failed")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
case <-exitCh:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func flushComboOutput(w *wsBufferWriter, wsConn *websocket.Conn) error {
|
||||||
|
if w.buffer.Len() != 0 {
|
||||||
|
err := wsConn.WriteMessage(websocket.TextMessage, w.buffer.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
w.buffer.Reset()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReceiveWsMsg receive websocket msg do some handling then write into ssh.session.stdin
|
||||||
|
func (ssConn *SshConn) Login(wsConn *websocket.Conn, logBuff *bytes.Buffer, exitCh chan bool) {
|
||||||
|
// tells other go routine quit
|
||||||
|
defer setQuit(exitCh)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-exitCh:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
// read websocket msg
|
||||||
|
_, wsData, err := wsConn.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Error("reading webSocket message failed")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
//unmashal bytes into struct
|
||||||
|
//msgObj := wsMsg{
|
||||||
|
// Type: "cmd",
|
||||||
|
// Cmd: "",
|
||||||
|
// Rows: 50,
|
||||||
|
// Cols: 180,
|
||||||
|
//}
|
||||||
|
msgObj := wsMsg{}
|
||||||
|
if err := json2.Unmarshal(wsData, &msgObj); err != nil {
|
||||||
|
msgObj.Type = "cmd"
|
||||||
|
msgObj.Cmd = string(wsData)
|
||||||
|
}
|
||||||
|
//if err := json.Unmarshal(wsData, &msgObj); err != nil {
|
||||||
|
// logrus.WithError(err).WithField("wsData", string(wsData)).Error("unmarshal websocket message failed")
|
||||||
|
//}
|
||||||
|
switch msgObj.Type {
|
||||||
|
|
||||||
|
case wsMsgResize:
|
||||||
|
// handle xterm.js size change
|
||||||
|
if msgObj.Cols > 0 && msgObj.Rows > 0 {
|
||||||
|
if err := ssConn.Session.WindowChange(msgObj.Rows, msgObj.Cols); err != nil {
|
||||||
|
logrus.WithError(err).Error("ssh pty change windows size failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case wsMsgCmd:
|
||||||
|
// handle xterm.js stdin
|
||||||
|
// decodeBytes, err := base64.StdEncoding.DecodeString(msgObj.Cmd)
|
||||||
|
decodeBytes := []byte(msgObj.Cmd)
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Error("websock cmd string base64 decoding failed")
|
||||||
|
}
|
||||||
|
if _, err := ssConn.StdinPipe.Write(decodeBytes); err != nil {
|
||||||
|
logrus.WithError(err).Error("ws cmd bytes write to ssh.stdin pipe failed")
|
||||||
|
}
|
||||||
|
// write input cmd to log buffer
|
||||||
|
if _, err := logBuff.Write(decodeBytes); err != nil {
|
||||||
|
logrus.WithError(err).Error("write received cmd into log buffer failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ssConn *SshConn) SessionWait(quitChan chan bool) {
|
||||||
|
if err := ssConn.Session.Wait(); err != nil {
|
||||||
|
logrus.WithError(err).Error("ssh session wait failed")
|
||||||
|
setQuit(quitChan)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setQuit(ch chan bool) {
|
||||||
|
ch <- true
|
||||||
|
}
|
||||||
|
|
||||||
|
type wsMsg struct {
|
||||||
|
Type string `json:"type"`
|
||||||
|
Cmd string `json:"cmd"`
|
||||||
|
Cols int `json:"cols"`
|
||||||
|
Rows int `json:"rows"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// 将终端的输出转发到前端
|
||||||
|
func WsWriterCopy(reader io.Reader, writer *websocket.Conn) {
|
||||||
|
buf := make([]byte, 8192)
|
||||||
|
reg1 := regexp.MustCompile(`stty rows \d+ && stty cols \d+ `)
|
||||||
|
for {
|
||||||
|
nr, err := reader.Read(buf)
|
||||||
|
if nr > 0 {
|
||||||
|
result1 := reg1.FindIndex(buf[0:nr])
|
||||||
|
if len(result1) > 0 {
|
||||||
|
fmt.Println(result1)
|
||||||
|
} else {
|
||||||
|
err := writer.WriteMessage(websocket.BinaryMessage, buf[0:nr])
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 将前端的输入转发到终端
|
||||||
|
func WsReaderCopy(reader *websocket.Conn, writer io.Writer) {
|
||||||
|
for {
|
||||||
|
messageType, p, err := reader.ReadMessage()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if messageType == websocket.TextMessage {
|
||||||
|
msgObj := wsMsg{}
|
||||||
|
if err = json2.Unmarshal(p, &msgObj); err != nil {
|
||||||
|
writer.Write(p)
|
||||||
|
} else if msgObj.Type == wsMsgResize {
|
||||||
|
// writer.Write([]byte("stty rows " + strconv.Itoa(msgObj.Rows) + " && stty cols " + strconv.Itoa(msgObj.Cols) + " \r"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
11
pkg/docker/volumes.go
Normal file
11
pkg/docker/volumes.go
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
package docker
|
||||||
|
|
||||||
|
import "strings"
|
||||||
|
|
||||||
|
func GetDir(id, envName string) string {
|
||||||
|
|
||||||
|
if strings.Contains(envName, "$AppID") && len(id) > 0 {
|
||||||
|
return strings.ReplaceAll(envName, "$AppID", id)
|
||||||
|
}
|
||||||
|
return envName
|
||||||
|
}
|
||||||
10
pkg/docker/volumes_test.go
Normal file
10
pkg/docker/volumes_test.go
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
package docker
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGetDir(t *testing.T) {
|
||||||
|
fmt.Println(GetDir("", "config"))
|
||||||
|
}
|
||||||
12
pkg/fs/fs.go
12
pkg/fs/fs.go
@@ -1,12 +0,0 @@
|
|||||||
package fs
|
|
||||||
|
|
||||||
import "io"
|
|
||||||
|
|
||||||
// CheckClose is a utility function used to check the return from
|
|
||||||
// Close in a defer statement.
|
|
||||||
func CheckClose(c io.Closer, err *error) {
|
|
||||||
cerr := c.Close()
|
|
||||||
if *err == nil {
|
|
||||||
*err = cerr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,412 +0,0 @@
|
|||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package generic_sync
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MapOf is like a Go map[interface{}]interface{} but is safe for concurrent use
|
|
||||||
// by multiple goroutines without additional locking or coordination.
|
|
||||||
// Loads, stores, and deletes run in amortized constant time.
|
|
||||||
//
|
|
||||||
// The MapOf type is specialized. Most code should use a plain Go map instead,
|
|
||||||
// with separate locking or coordination, for better type safety and to make it
|
|
||||||
// easier to maintain other invariants along with the map content.
|
|
||||||
//
|
|
||||||
// The MapOf type is optimized for two common use cases: (1) when the entry for a given
|
|
||||||
// key is only ever written once but read many times, as in caches that only grow,
|
|
||||||
// or (2) when multiple goroutines read, write, and overwrite entries for disjoint
|
|
||||||
// sets of keys. In these two cases, use of a MapOf may significantly reduce lock
|
|
||||||
// contention compared to a Go map paired with a separate Mutex or RWMutex.
|
|
||||||
//
|
|
||||||
// The zero MapOf is empty and ready for use. A MapOf must not be copied after first use.
|
|
||||||
type MapOf[K comparable, V any] struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
|
|
||||||
// read contains the portion of the map's contents that are safe for
|
|
||||||
// concurrent access (with or without mu held).
|
|
||||||
//
|
|
||||||
// The read field itself is always safe to load, but must only be stored with
|
|
||||||
// mu held.
|
|
||||||
//
|
|
||||||
// Entries stored in read may be updated concurrently without mu, but updating
|
|
||||||
// a previously-expunged entry requires that the entry be copied to the dirty
|
|
||||||
// map and unexpunged with mu held.
|
|
||||||
read atomic.Value // readOnly
|
|
||||||
|
|
||||||
// dirty contains the portion of the map's contents that require mu to be
|
|
||||||
// held. To ensure that the dirty map can be promoted to the read map quickly,
|
|
||||||
// it also includes all of the non-expunged entries in the read map.
|
|
||||||
//
|
|
||||||
// Expunged entries are not stored in the dirty map. An expunged entry in the
|
|
||||||
// clean map must be unexpunged and added to the dirty map before a new value
|
|
||||||
// can be stored to it.
|
|
||||||
//
|
|
||||||
// If the dirty map is nil, the next write to the map will initialize it by
|
|
||||||
// making a shallow copy of the clean map, omitting stale entries.
|
|
||||||
dirty map[K]*entry[V]
|
|
||||||
|
|
||||||
// misses counts the number of loads since the read map was last updated that
|
|
||||||
// needed to lock mu to determine whether the key was present.
|
|
||||||
//
|
|
||||||
// Once enough misses have occurred to cover the cost of copying the dirty
|
|
||||||
// map, the dirty map will be promoted to the read map (in the unamended
|
|
||||||
// state) and the next store to the map will make a new dirty copy.
|
|
||||||
misses int
|
|
||||||
}
|
|
||||||
|
|
||||||
// readOnly is an immutable struct stored atomically in the MapOf.read field.
|
|
||||||
type readOnly[K comparable, V any] struct {
|
|
||||||
m map[K]*entry[V]
|
|
||||||
amended bool // true if the dirty map contains some key not in m.
|
|
||||||
}
|
|
||||||
|
|
||||||
// expunged is an arbitrary pointer that marks entries which have been deleted
|
|
||||||
// from the dirty map.
|
|
||||||
var expunged = unsafe.Pointer(new(interface{}))
|
|
||||||
|
|
||||||
// An entry is a slot in the map corresponding to a particular key.
|
|
||||||
type entry[V any] struct {
|
|
||||||
// p points to the interface{} value stored for the entry.
|
|
||||||
//
|
|
||||||
// If p == nil, the entry has been deleted and m.dirty == nil.
|
|
||||||
//
|
|
||||||
// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
|
|
||||||
// is missing from m.dirty.
|
|
||||||
//
|
|
||||||
// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
|
|
||||||
// != nil, in m.dirty[key].
|
|
||||||
//
|
|
||||||
// An entry can be deleted by atomic replacement with nil: when m.dirty is
|
|
||||||
// next created, it will atomically replace nil with expunged and leave
|
|
||||||
// m.dirty[key] unset.
|
|
||||||
//
|
|
||||||
// An entry's associated value can be updated by atomic replacement, provided
|
|
||||||
// p != expunged. If p == expunged, an entry's associated value can be updated
|
|
||||||
// only after first setting m.dirty[key] = e so that lookups using the dirty
|
|
||||||
// map find the entry.
|
|
||||||
p unsafe.Pointer // *interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newEntry[V any](i V) *entry[V] {
|
|
||||||
return &entry[V]{p: unsafe.Pointer(&i)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load returns the value stored in the map for a key, or nil if no
|
|
||||||
// value is present.
|
|
||||||
// The ok result indicates whether value was found in the map.
|
|
||||||
func (m *MapOf[K, V]) Load(key K) (value V, ok bool) {
|
|
||||||
read, _ := m.read.Load().(readOnly[K, V])
|
|
||||||
e, ok := read.m[key]
|
|
||||||
if !ok && read.amended {
|
|
||||||
m.mu.Lock()
|
|
||||||
// Avoid reporting a spurious miss if m.dirty got promoted while we were
|
|
||||||
// blocked on m.mu. (If further loads of the same key will not miss, it's
|
|
||||||
// not worth copying the dirty map for this key.)
|
|
||||||
read, _ = m.read.Load().(readOnly[K, V])
|
|
||||||
e, ok = read.m[key]
|
|
||||||
if !ok && read.amended {
|
|
||||||
e, ok = m.dirty[key]
|
|
||||||
// Regardless of whether the entry was present, record a miss: this key
|
|
||||||
// will take the slow path until the dirty map is promoted to the read
|
|
||||||
// map.
|
|
||||||
m.missLocked()
|
|
||||||
}
|
|
||||||
m.mu.Unlock()
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
return value, false
|
|
||||||
}
|
|
||||||
return e.load()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapOf[K, V]) Has(key K) bool {
|
|
||||||
_, ok := m.Load(key)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *entry[V]) load() (value V, ok bool) {
|
|
||||||
p := atomic.LoadPointer(&e.p)
|
|
||||||
if p == nil || p == expunged {
|
|
||||||
return value, false
|
|
||||||
}
|
|
||||||
return *(*V)(p), true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store sets the value for a key.
|
|
||||||
func (m *MapOf[K, V]) Store(key K, value V) {
|
|
||||||
read, _ := m.read.Load().(readOnly[K, V])
|
|
||||||
if e, ok := read.m[key]; ok && e.tryStore(&value) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
m.mu.Lock()
|
|
||||||
read, _ = m.read.Load().(readOnly[K, V])
|
|
||||||
if e, ok := read.m[key]; ok {
|
|
||||||
if e.unexpungeLocked() {
|
|
||||||
// The entry was previously expunged, which implies that there is a
|
|
||||||
// non-nil dirty map and this entry is not in it.
|
|
||||||
m.dirty[key] = e
|
|
||||||
}
|
|
||||||
e.storeLocked(&value)
|
|
||||||
} else if e, ok := m.dirty[key]; ok {
|
|
||||||
e.storeLocked(&value)
|
|
||||||
} else {
|
|
||||||
if !read.amended {
|
|
||||||
// We're adding the first new key to the dirty map.
|
|
||||||
// Make sure it is allocated and mark the read-only map as incomplete.
|
|
||||||
m.dirtyLocked()
|
|
||||||
m.read.Store(readOnly[K, V]{m: read.m, amended: true})
|
|
||||||
}
|
|
||||||
m.dirty[key] = newEntry(value)
|
|
||||||
}
|
|
||||||
m.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// tryStore stores a value if the entry has not been expunged.
|
|
||||||
//
|
|
||||||
// If the entry is expunged, tryStore returns false and leaves the entry
|
|
||||||
// unchanged.
|
|
||||||
func (e *entry[V]) tryStore(i *V) bool {
|
|
||||||
for {
|
|
||||||
p := atomic.LoadPointer(&e.p)
|
|
||||||
if p == expunged {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// unexpungeLocked ensures that the entry is not marked as expunged.
|
|
||||||
//
|
|
||||||
// If the entry was previously expunged, it must be added to the dirty map
|
|
||||||
// before m.mu is unlocked.
|
|
||||||
func (e *entry[V]) unexpungeLocked() (wasExpunged bool) {
|
|
||||||
return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// storeLocked unconditionally stores a value to the entry.
|
|
||||||
//
|
|
||||||
// The entry must be known not to be expunged.
|
|
||||||
func (e *entry[V]) storeLocked(i *V) {
|
|
||||||
atomic.StorePointer(&e.p, unsafe.Pointer(i))
|
|
||||||
}
|
|
||||||
|
|
||||||
// LoadOrStore returns the existing value for the key if present.
|
|
||||||
// Otherwise, it stores and returns the given value.
|
|
||||||
// The loaded result is true if the value was loaded, false if stored.
|
|
||||||
func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
|
|
||||||
// Avoid locking if it's a clean hit.
|
|
||||||
read, _ := m.read.Load().(readOnly[K, V])
|
|
||||||
if e, ok := read.m[key]; ok {
|
|
||||||
actual, loaded, ok := e.tryLoadOrStore(value)
|
|
||||||
if ok {
|
|
||||||
return actual, loaded
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
m.mu.Lock()
|
|
||||||
read, _ = m.read.Load().(readOnly[K, V])
|
|
||||||
if e, ok := read.m[key]; ok {
|
|
||||||
if e.unexpungeLocked() {
|
|
||||||
m.dirty[key] = e
|
|
||||||
}
|
|
||||||
actual, loaded, _ = e.tryLoadOrStore(value)
|
|
||||||
} else if e, ok := m.dirty[key]; ok {
|
|
||||||
actual, loaded, _ = e.tryLoadOrStore(value)
|
|
||||||
m.missLocked()
|
|
||||||
} else {
|
|
||||||
if !read.amended {
|
|
||||||
// We're adding the first new key to the dirty map.
|
|
||||||
// Make sure it is allocated and mark the read-only map as incomplete.
|
|
||||||
m.dirtyLocked()
|
|
||||||
m.read.Store(readOnly[K, V]{m: read.m, amended: true})
|
|
||||||
}
|
|
||||||
m.dirty[key] = newEntry(value)
|
|
||||||
actual, loaded = value, false
|
|
||||||
}
|
|
||||||
m.mu.Unlock()
|
|
||||||
|
|
||||||
return actual, loaded
|
|
||||||
}
|
|
||||||
|
|
||||||
// tryLoadOrStore atomically loads or stores a value if the entry is not
|
|
||||||
// expunged.
|
|
||||||
//
|
|
||||||
// If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
|
|
||||||
// returns with ok==false.
|
|
||||||
func (e *entry[V]) tryLoadOrStore(i V) (actual V, loaded, ok bool) {
|
|
||||||
p := atomic.LoadPointer(&e.p)
|
|
||||||
if p == expunged {
|
|
||||||
return actual, false, false
|
|
||||||
}
|
|
||||||
if p != nil {
|
|
||||||
return *(*V)(p), true, true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy the interface after the first load to make this method more amenable
|
|
||||||
// to escape analysis: if we hit the "load" path or the entry is expunged, we
|
|
||||||
// shouldn'V bother heap-allocating.
|
|
||||||
ic := i
|
|
||||||
for {
|
|
||||||
if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
|
|
||||||
return i, false, true
|
|
||||||
}
|
|
||||||
p = atomic.LoadPointer(&e.p)
|
|
||||||
if p == expunged {
|
|
||||||
return actual, false, false
|
|
||||||
}
|
|
||||||
if p != nil {
|
|
||||||
return *(*V)(p), true, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete deletes the value for a key.
|
|
||||||
func (m *MapOf[K, V]) Delete(key K) {
|
|
||||||
read, _ := m.read.Load().(readOnly[K, V])
|
|
||||||
e, ok := read.m[key]
|
|
||||||
if !ok && read.amended {
|
|
||||||
m.mu.Lock()
|
|
||||||
read, _ = m.read.Load().(readOnly[K, V])
|
|
||||||
e, ok = read.m[key]
|
|
||||||
if !ok && read.amended {
|
|
||||||
delete(m.dirty, key)
|
|
||||||
}
|
|
||||||
m.mu.Unlock()
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
e.delete()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *entry[V]) delete() (hadValue bool) {
|
|
||||||
for {
|
|
||||||
p := atomic.LoadPointer(&e.p)
|
|
||||||
if p == nil || p == expunged {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if atomic.CompareAndSwapPointer(&e.p, p, nil) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Range calls f sequentially for each key and value present in the map.
|
|
||||||
// If f returns false, range stops the iteration.
|
|
||||||
//
|
|
||||||
// Range does not necessarily correspond to any consistent snapshot of the MapOf's
|
|
||||||
// contents: no key will be visited more than once, but if the value for any key
|
|
||||||
// is stored or deleted concurrently, Range may reflect any mapping for that key
|
|
||||||
// from any point during the Range call.
|
|
||||||
//
|
|
||||||
// Range may be O(N) with the number of elements in the map even if f returns
|
|
||||||
// false after a constant number of calls.
|
|
||||||
func (m *MapOf[K, V]) Range(f func(key K, value V) bool) {
|
|
||||||
// We need to be able to iterate over all of the keys that were already
|
|
||||||
// present at the start of the call to Range.
|
|
||||||
// If read.amended is false, then read.m satisfies that property without
|
|
||||||
// requiring us to hold m.mu for a long time.
|
|
||||||
read, _ := m.read.Load().(readOnly[K, V])
|
|
||||||
if read.amended {
|
|
||||||
// m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
|
|
||||||
// (assuming the caller does not break out early), so a call to Range
|
|
||||||
// amortizes an entire copy of the map: we can promote the dirty copy
|
|
||||||
// immediately!
|
|
||||||
m.mu.Lock()
|
|
||||||
read, _ = m.read.Load().(readOnly[K, V])
|
|
||||||
if read.amended {
|
|
||||||
read = readOnly[K, V]{m: m.dirty}
|
|
||||||
m.read.Store(read)
|
|
||||||
m.dirty = nil
|
|
||||||
m.misses = 0
|
|
||||||
}
|
|
||||||
m.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, e := range read.m {
|
|
||||||
v, ok := e.load()
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !f(k, v) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Values returns a slice of the values in the map.
|
|
||||||
func (m *MapOf[K, V]) Values() []V {
|
|
||||||
var values []V
|
|
||||||
m.Range(func(key K, value V) bool {
|
|
||||||
values = append(values, value)
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
return values
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapOf[K, V]) Count() int {
|
|
||||||
return len(m.dirty)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapOf[K, V]) Empty() bool {
|
|
||||||
return m.Count() == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapOf[K, V]) ToMap() map[K]V {
|
|
||||||
ans := make(map[K]V)
|
|
||||||
m.Range(func(key K, value V) bool {
|
|
||||||
ans[key] = value
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
return ans
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapOf[K, V]) Clear() {
|
|
||||||
m.Range(func(key K, value V) bool {
|
|
||||||
m.Delete(key)
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapOf[K, V]) missLocked() {
|
|
||||||
m.misses++
|
|
||||||
if m.misses < len(m.dirty) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
m.read.Store(readOnly[K, V]{m: m.dirty})
|
|
||||||
m.dirty = nil
|
|
||||||
m.misses = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *MapOf[K, V]) dirtyLocked() {
|
|
||||||
if m.dirty != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
read, _ := m.read.Load().(readOnly[K, V])
|
|
||||||
m.dirty = make(map[K]*entry[V], len(read.m))
|
|
||||||
for k, e := range read.m {
|
|
||||||
if !e.tryExpungeLocked() {
|
|
||||||
m.dirty[k] = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *entry[V]) tryExpungeLocked() (isExpunged bool) {
|
|
||||||
p := atomic.LoadPointer(&e.p)
|
|
||||||
for p == nil {
|
|
||||||
if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
p = atomic.LoadPointer(&e.p)
|
|
||||||
}
|
|
||||||
return p == expunged
|
|
||||||
}
|
|
||||||
51
pkg/quic_helper/config.go
Normal file
51
pkg/quic_helper/config.go
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
package quic_helper
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/pem"
|
||||||
|
"math/big"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Setup a bare-bones TLS config for the server
|
||||||
|
func GetGenerateTLSConfig(token string) *tls.Config {
|
||||||
|
key, err := rsa.GenerateKey(rand.Reader, 1024)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
template := x509.Certificate{SerialNumber: big.NewInt(1)}
|
||||||
|
certDER, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
keyPEM := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)})
|
||||||
|
certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER})
|
||||||
|
|
||||||
|
tlsCert, err := tls.X509KeyPair(certPEM, keyPEM)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return &tls.Config{
|
||||||
|
Certificates: []tls.Certificate{tlsCert},
|
||||||
|
NextProtos: []string{token},
|
||||||
|
SessionTicketsDisabled: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
func GetClientTlsConfig(otherToken string) *tls.Config {
|
||||||
|
return &tls.Config{
|
||||||
|
InsecureSkipVerify: true,
|
||||||
|
NextProtos: []string{otherToken},
|
||||||
|
SessionTicketsDisabled: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetQUICConfig() *quic.Config {
|
||||||
|
return &quic.Config{
|
||||||
|
ConnectionIDLength: 4,
|
||||||
|
KeepAlive: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
package sign
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/base64"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type HMACSign struct {
|
|
||||||
SecretKey []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s HMACSign) Sign(data string, expire int64) string {
|
|
||||||
h := hmac.New(sha256.New, s.SecretKey)
|
|
||||||
expireTimeStamp := strconv.FormatInt(expire, 10)
|
|
||||||
_, err := io.WriteString(h, data+":"+expireTimeStamp)
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return base64.URLEncoding.EncodeToString(h.Sum(nil)) + ":" + expireTimeStamp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s HMACSign) Verify(data, sign string) error {
|
|
||||||
signSlice := strings.Split(sign, ":")
|
|
||||||
// check whether contains expire time
|
|
||||||
if signSlice[len(signSlice)-1] == "" {
|
|
||||||
return ErrExpireMissing
|
|
||||||
}
|
|
||||||
// check whether expire time is expired
|
|
||||||
expires, err := strconv.ParseInt(signSlice[len(signSlice)-1], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return ErrExpireInvalid
|
|
||||||
}
|
|
||||||
// if expire time is expired, return error
|
|
||||||
if expires < time.Now().Unix() && expires != 0 {
|
|
||||||
return ErrSignExpired
|
|
||||||
}
|
|
||||||
// verify sign
|
|
||||||
if s.Sign(data, expires) != sign {
|
|
||||||
return ErrSignInvalid
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewHMACSign(secret []byte) Sign {
|
|
||||||
return HMACSign{SecretKey: secret}
|
|
||||||
}
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
package sign
|
|
||||||
|
|
||||||
import "errors"
|
|
||||||
|
|
||||||
type Sign interface {
|
|
||||||
Sign(data string, expire int64) string
|
|
||||||
Verify(data, sign string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ErrSignExpired = errors.New("sign expired")
|
|
||||||
ErrSignInvalid = errors.New("sign invalid")
|
|
||||||
ErrExpireInvalid = errors.New("expire invalid")
|
|
||||||
ErrExpireMissing = errors.New("expire missing")
|
|
||||||
)
|
|
||||||
@@ -1,212 +0,0 @@
|
|||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package singleflight provides a duplicate function call suppression
|
|
||||||
// mechanism.
|
|
||||||
package singleflight
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
"runtime/debug"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// errGoexit indicates the runtime.Goexit was called in
|
|
||||||
// the user given function.
|
|
||||||
var errGoexit = errors.New("runtime.Goexit was called")
|
|
||||||
|
|
||||||
// A panicError is an arbitrary value recovered from a panic
|
|
||||||
// with the stack trace during the execution of given function.
|
|
||||||
type panicError struct {
|
|
||||||
value any
|
|
||||||
stack []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error implements error interface.
|
|
||||||
func (p *panicError) Error() string {
|
|
||||||
return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newPanicError(v any) error {
|
|
||||||
stack := debug.Stack()
|
|
||||||
|
|
||||||
// The first line of the stack trace is of the form "goroutine N [status]:"
|
|
||||||
// but by the time the panic reaches Do the goroutine may no longer exist
|
|
||||||
// and its status will have changed. Trim out the misleading line.
|
|
||||||
if line := bytes.IndexByte(stack[:], '\n'); line >= 0 {
|
|
||||||
stack = stack[line+1:]
|
|
||||||
}
|
|
||||||
return &panicError{value: v, stack: stack}
|
|
||||||
}
|
|
||||||
|
|
||||||
// call is an in-flight or completed singleflight.Do call
|
|
||||||
type call[T any] struct {
|
|
||||||
wg sync.WaitGroup
|
|
||||||
|
|
||||||
// These fields are written once before the WaitGroup is done
|
|
||||||
// and are only read after the WaitGroup is done.
|
|
||||||
val T
|
|
||||||
err error
|
|
||||||
|
|
||||||
// forgotten indicates whether Forget was called with this call's key
|
|
||||||
// while the call was still in flight.
|
|
||||||
forgotten bool
|
|
||||||
|
|
||||||
// These fields are read and written with the singleflight
|
|
||||||
// mutex held before the WaitGroup is done, and are read but
|
|
||||||
// not written after the WaitGroup is done.
|
|
||||||
dups int
|
|
||||||
chans []chan<- Result[T]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Group represents a class of work and forms a namespace in
|
|
||||||
// which units of work can be executed with duplicate suppression.
|
|
||||||
type Group[T any] struct {
|
|
||||||
mu sync.Mutex // protects m
|
|
||||||
m map[string]*call[T] // lazily initialized
|
|
||||||
}
|
|
||||||
|
|
||||||
// Result holds the results of Do, so they can be passed
|
|
||||||
// on a channel.
|
|
||||||
type Result[T any] struct {
|
|
||||||
Val T
|
|
||||||
Err error
|
|
||||||
Shared bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do executes and returns the results of the given function, making
|
|
||||||
// sure that only one execution is in-flight for a given key at a
|
|
||||||
// time. If a duplicate comes in, the duplicate caller waits for the
|
|
||||||
// original to complete and receives the same results.
|
|
||||||
// The return value shared indicates whether v was given to multiple callers.
|
|
||||||
func (g *Group[T]) Do(key string, fn func() (T, error)) (v T, err error, shared bool) {
|
|
||||||
g.mu.Lock()
|
|
||||||
if g.m == nil {
|
|
||||||
g.m = make(map[string]*call[T])
|
|
||||||
}
|
|
||||||
if c, ok := g.m[key]; ok {
|
|
||||||
c.dups++
|
|
||||||
g.mu.Unlock()
|
|
||||||
c.wg.Wait()
|
|
||||||
|
|
||||||
if e, ok := c.err.(*panicError); ok {
|
|
||||||
panic(e)
|
|
||||||
} else if c.err == errGoexit {
|
|
||||||
runtime.Goexit()
|
|
||||||
}
|
|
||||||
return c.val, c.err, true
|
|
||||||
}
|
|
||||||
c := new(call[T])
|
|
||||||
c.wg.Add(1)
|
|
||||||
g.m[key] = c
|
|
||||||
g.mu.Unlock()
|
|
||||||
|
|
||||||
g.doCall(c, key, fn)
|
|
||||||
return c.val, c.err, c.dups > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// DoChan is like Do but returns a channel that will receive the
|
|
||||||
// results when they are ready.
|
|
||||||
//
|
|
||||||
// The returned channel will not be closed.
|
|
||||||
func (g *Group[T]) DoChan(key string, fn func() (T, error)) <-chan Result[T] {
|
|
||||||
ch := make(chan Result[T], 1)
|
|
||||||
g.mu.Lock()
|
|
||||||
if g.m == nil {
|
|
||||||
g.m = make(map[string]*call[T])
|
|
||||||
}
|
|
||||||
if c, ok := g.m[key]; ok {
|
|
||||||
c.dups++
|
|
||||||
c.chans = append(c.chans, ch)
|
|
||||||
g.mu.Unlock()
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
c := &call[T]{chans: []chan<- Result[T]{ch}}
|
|
||||||
c.wg.Add(1)
|
|
||||||
g.m[key] = c
|
|
||||||
g.mu.Unlock()
|
|
||||||
|
|
||||||
go g.doCall(c, key, fn)
|
|
||||||
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
// doCall handles the single call for a key.
|
|
||||||
func (g *Group[T]) doCall(c *call[T], key string, fn func() (T, error)) {
|
|
||||||
normalReturn := false
|
|
||||||
recovered := false
|
|
||||||
|
|
||||||
// use double-defer to distinguish panic from runtime.Goexit,
|
|
||||||
// more details see https://golang.org/cl/134395
|
|
||||||
defer func() {
|
|
||||||
// the given function invoked runtime.Goexit
|
|
||||||
if !normalReturn && !recovered {
|
|
||||||
c.err = errGoexit
|
|
||||||
}
|
|
||||||
|
|
||||||
c.wg.Done()
|
|
||||||
g.mu.Lock()
|
|
||||||
defer g.mu.Unlock()
|
|
||||||
if !c.forgotten {
|
|
||||||
delete(g.m, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
if e, ok := c.err.(*panicError); ok {
|
|
||||||
// In order to prevent the waiting channels from being blocked forever,
|
|
||||||
// needs to ensure that this panic cannot be recovered.
|
|
||||||
if len(c.chans) > 0 {
|
|
||||||
go panic(e)
|
|
||||||
select {} // Keep this goroutine around so that it will appear in the crash dump.
|
|
||||||
} else {
|
|
||||||
panic(e)
|
|
||||||
}
|
|
||||||
} else if c.err == errGoexit {
|
|
||||||
// Already in the process of goexit, no need to call again
|
|
||||||
} else {
|
|
||||||
// Normal return
|
|
||||||
for _, ch := range c.chans {
|
|
||||||
ch <- Result[T]{c.val, c.err, c.dups > 0}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
func() {
|
|
||||||
defer func() {
|
|
||||||
if !normalReturn {
|
|
||||||
// Ideally, we would wait to take a stack trace until we've determined
|
|
||||||
// whether this is a panic or a runtime.Goexit.
|
|
||||||
//
|
|
||||||
// Unfortunately, the only way we can distinguish the two is to see
|
|
||||||
// whether the recover stopped the goroutine from terminating, and by
|
|
||||||
// the time we know that, the part of the stack trace relevant to the
|
|
||||||
// panic has been discarded.
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
c.err = newPanicError(r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
c.val, c.err = fn()
|
|
||||||
normalReturn = true
|
|
||||||
}()
|
|
||||||
|
|
||||||
if !normalReturn {
|
|
||||||
recovered = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Forget tells the singleflight to forget about a key. Future calls
|
|
||||||
// to Do for this key will call the function rather than waiting for
|
|
||||||
// an earlier call to complete.
|
|
||||||
func (g *Group[T]) Forget(key string) {
|
|
||||||
g.mu.Lock()
|
|
||||||
if c, ok := g.m[key]; ok {
|
|
||||||
c.forgotten = true
|
|
||||||
}
|
|
||||||
delete(g.m, key)
|
|
||||||
g.mu.Unlock()
|
|
||||||
}
|
|
||||||
@@ -13,12 +13,11 @@ package sqlite
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/IceWhaleTech/CasaOS-Common/utils/logger"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/model"
|
|
||||||
"github.com/IceWhaleTech/CasaOS/pkg/utils/file"
|
"github.com/IceWhaleTech/CasaOS/pkg/utils/file"
|
||||||
|
"github.com/IceWhaleTech/CasaOS/pkg/utils/loger"
|
||||||
model2 "github.com/IceWhaleTech/CasaOS/service/model"
|
model2 "github.com/IceWhaleTech/CasaOS/service/model"
|
||||||
"github.com/glebarez/sqlite"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
|
"gorm.io/driver/sqlite"
|
||||||
"gorm.io/gorm"
|
"gorm.io/gorm"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -29,27 +28,28 @@ func GetDb(dbPath string) *gorm.DB {
|
|||||||
return gdb
|
return gdb
|
||||||
}
|
}
|
||||||
// Refer https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
// Refer https://github.com/go-sql-driver/mysql#dsn-data-source-name
|
||||||
// dsn := fmt.Sprintf("%v:%v@tcp(%v:%v)/%v?charset=utf8mb4&parseTime=True&loc=Local", m.User, m.PWD, m.IP, m.Port, m.DBName)
|
//dsn := fmt.Sprintf("%v:%v@tcp(%v:%v)/%v?charset=utf8mb4&parseTime=True&loc=Local", m.User, m.PWD, m.IP, m.Port, m.DBName)
|
||||||
// db, err := gorm.Open(mysql2.Open(dsn), &gorm.Config{})
|
//db, err := gorm.Open(mysql2.Open(dsn), &gorm.Config{})
|
||||||
file.IsNotExistMkDir(dbPath)
|
file.IsNotExistMkDir(dbPath)
|
||||||
db, err := gorm.Open(sqlite.Open(dbPath+"/casaOS.db"), &gorm.Config{})
|
db, err := gorm.Open(sqlite.Open(dbPath+"/casaOS.db"), &gorm.Config{})
|
||||||
c, _ := db.DB()
|
c, _ := db.DB()
|
||||||
c.SetMaxIdleConns(10)
|
c.SetMaxIdleConns(10)
|
||||||
c.SetMaxOpenConns(1)
|
c.SetMaxOpenConns(100)
|
||||||
c.SetConnMaxIdleTime(time.Second * 1000)
|
c.SetConnMaxIdleTime(time.Second * 1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("sqlite connect error", zap.Any("db connect error", err))
|
loger.Error("sqlite connect error", zap.Any("db connect error", err))
|
||||||
panic("sqlite connect error")
|
panic("sqlite connect error")
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
gdb = db
|
gdb = db
|
||||||
|
|
||||||
err = db.AutoMigrate(&model2.AppNotify{}, model2.SharesDBModel{}, model2.ConnectionsDBModel{}, model.Storage{})
|
err = db.AutoMigrate(&model2.AppNotify{}, &model2.AppListDBModel{}, model2.SharesDBModel{}, model2.ConnectionsDBModel{})
|
||||||
db.Exec("DROP TABLE IF EXISTS o_application")
|
db.Exec("DROP TABLE IF EXISTS o_application")
|
||||||
db.Exec("DROP TABLE IF EXISTS o_friend")
|
db.Exec("DROP TABLE IF EXISTS o_friend")
|
||||||
db.Exec("DROP TABLE IF EXISTS o_person_download")
|
db.Exec("DROP TABLE IF EXISTS o_person_download")
|
||||||
db.Exec("DROP TABLE IF EXISTS o_person_down_record")
|
db.Exec("DROP TABLE IF EXISTS o_person_down_record")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("check or create db error", zap.Any("error", err))
|
loger.Error("check or create db error", zap.Any("error", err))
|
||||||
}
|
}
|
||||||
return db
|
return db
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,18 +0,0 @@
|
|||||||
package utils
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
var balance = ".balance"
|
|
||||||
|
|
||||||
func IsBalance(str string) bool {
|
|
||||||
return strings.Contains(str, balance)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetActualMountPath remove balance suffix
|
|
||||||
func GetActualMountPath(virtualPath string) string {
|
|
||||||
bIndex := strings.LastIndex(virtualPath, ".balance")
|
|
||||||
if bIndex != -1 {
|
|
||||||
virtualPath = virtualPath[:bIndex]
|
|
||||||
}
|
|
||||||
return virtualPath
|
|
||||||
}
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
package utils
|
|
||||||
|
|
||||||
func IsBool(bs ...bool) bool {
|
|
||||||
return len(bs) > 0 && bs[0]
|
|
||||||
}
|
|
||||||
@@ -2,12 +2,11 @@ package command
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path/filepath"
|
"time"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func OnlyExec(cmdStr string) {
|
func OnlyExec(cmdStr string) {
|
||||||
@@ -96,52 +95,19 @@ func ExecLSBLKByPath(path string) []byte {
|
|||||||
return output
|
return output
|
||||||
}
|
}
|
||||||
|
|
||||||
func ExecuteScripts(scriptDirectory string) {
|
// exec smart
|
||||||
if _, err := os.Stat(scriptDirectory); os.IsNotExist(err) {
|
func ExecSmartCTLByPath(path string) []byte {
|
||||||
fmt.Printf("No post-start scripts at %s\n", scriptDirectory)
|
timeout := 3
|
||||||
return
|
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
|
||||||
}
|
defer cancel()
|
||||||
|
output, err := exec.CommandContext(ctx, "smartctl", "-a", path, "-j").Output()
|
||||||
files, err := os.ReadDir(scriptDirectory)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Failed to read from script directory %s: %s\n", scriptDirectory, err.Error())
|
fmt.Println("smartctl", err)
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
return output
|
||||||
for _, file := range files {
|
}
|
||||||
if file.IsDir() {
|
|
||||||
continue
|
func ExecEnabledSMART(path string) {
|
||||||
}
|
exec.Command("smartctl", "-s on", path).Output()
|
||||||
|
|
||||||
scriptFilepath := filepath.Join(scriptDirectory, file.Name())
|
|
||||||
|
|
||||||
f, err := os.Open(scriptFilepath)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Failed to open script file %s: %s\n", scriptFilepath, err.Error())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f.Close()
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(f)
|
|
||||||
scanner.Scan()
|
|
||||||
shebang := scanner.Text()
|
|
||||||
|
|
||||||
interpreter := "/bin/sh"
|
|
||||||
if strings.HasPrefix(shebang, "#!") {
|
|
||||||
interpreter = shebang[2:]
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd := exec.Command(interpreter, scriptFilepath)
|
|
||||||
|
|
||||||
fmt.Printf("Executing post-start script %s using %s\n", scriptFilepath, interpreter)
|
|
||||||
|
|
||||||
cmd.Stdout = os.Stdout
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
|
|
||||||
err = cmd.Run()
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Failed to execute post-start script %s: %s\n", scriptFilepath, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fmt.Println("Finished executing post-start scripts.")
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,29 +0,0 @@
|
|||||||
package command
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"gotest.tools/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestExecuteScripts(t *testing.T) {
|
|
||||||
// make a temp directory
|
|
||||||
tmpDir, err := os.MkdirTemp("", "casaos-test-*")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer os.RemoveAll(tmpDir)
|
|
||||||
|
|
||||||
ExecuteScripts(tmpDir)
|
|
||||||
|
|
||||||
// create a sample script under tmpDir
|
|
||||||
script := tmpDir + "/test.sh"
|
|
||||||
f, err := os.Create(script)
|
|
||||||
assert.NilError(t, err)
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
// write a sample script
|
|
||||||
_, err = f.WriteString("#!/bin/bash\necho 123")
|
|
||||||
assert.NilError(t, err)
|
|
||||||
|
|
||||||
ExecuteScripts(tmpDir)
|
|
||||||
}
|
|
||||||
@@ -30,7 +30,6 @@ const (
|
|||||||
Record_NOT_EXIST = 20007
|
Record_NOT_EXIST = 20007
|
||||||
Record_ALREADY_EXIST = 20008
|
Record_ALREADY_EXIST = 20008
|
||||||
SERVICE_NOT_RUNNING = 20009
|
SERVICE_NOT_RUNNING = 20009
|
||||||
CHARACTER_LIMIT = 20010
|
|
||||||
|
|
||||||
//disk
|
//disk
|
||||||
NAME_NOT_AVAILABLE = 40001
|
NAME_NOT_AVAILABLE = 40001
|
||||||
@@ -46,12 +45,11 @@ const (
|
|||||||
ERROR_APP_NAME_EXIST = 50004
|
ERROR_APP_NAME_EXIST = 50004
|
||||||
|
|
||||||
//file
|
//file
|
||||||
FILE_DOES_NOT_EXIST = 60001
|
FILE_DOES_NOT_EXIST = 60001
|
||||||
FILE_READ_ERROR = 60002
|
FILE_READ_ERROR = 60002
|
||||||
FILE_DELETE_ERROR = 60003
|
FILE_DELETE_ERROR = 60003
|
||||||
DIR_NOT_EXISTS = 60004
|
DIR_NOT_EXISTS = 60004
|
||||||
SOURCE_DES_SAME = 60005
|
SOURCE_DES_SAME = 60005
|
||||||
MOUNTED_DIRECTIORIES = 60006
|
|
||||||
|
|
||||||
//share
|
//share
|
||||||
SHARE_ALREADY_EXISTS = 70001
|
SHARE_ALREADY_EXISTS = 70001
|
||||||
@@ -87,7 +85,6 @@ var MsgFlags = map[int]string{
|
|||||||
Record_ALREADY_EXIST: "Record already exists",
|
Record_ALREADY_EXIST: "Record already exists",
|
||||||
Record_NOT_EXIST: "Record does not exist",
|
Record_NOT_EXIST: "Record does not exist",
|
||||||
SERVICE_NOT_RUNNING: "Service is not running",
|
SERVICE_NOT_RUNNING: "Service is not running",
|
||||||
CHARACTER_LIMIT: "Only uppercase letters, lowercase letters and numbers are allowed for username and password.",
|
|
||||||
|
|
||||||
//app
|
//app
|
||||||
UNINSTALL_APP_ERROR: "Error uninstalling app",
|
UNINSTALL_APP_ERROR: "Error uninstalling app",
|
||||||
@@ -110,14 +107,13 @@ var MsgFlags = map[int]string{
|
|||||||
|
|
||||||
DIR_NOT_EXISTS: "Directory does not exist",
|
DIR_NOT_EXISTS: "Directory does not exist",
|
||||||
|
|
||||||
FILE_READ_ERROR: "File read error",
|
FILE_READ_ERROR: "File read error",
|
||||||
FILE_DELETE_ERROR: "Delete error",
|
FILE_DELETE_ERROR: "Delete error",
|
||||||
MOUNTED_DIRECTIORIES: "The directory is mounted, please unmount it first.",
|
|
||||||
|
|
||||||
COMMAND_ERROR_INVALID_OPERATION: "invalid operation",
|
COMMAND_ERROR_INVALID_OPERATION: "invalid operation",
|
||||||
}
|
}
|
||||||
|
|
||||||
// 获取错误信息
|
//获取错误信息
|
||||||
func GetMsg(code int) string {
|
func GetMsg(code int) string {
|
||||||
msg, ok := MsgFlags[code]
|
msg, ok := MsgFlags[code]
|
||||||
if ok {
|
if ok {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user