mirror of
https://github.com/cristicalin/synology-csi.git
synced 2026-03-27 11:23:05 +00:00
Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
33076f1d58 | ||
|
|
01541defd8 |
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,3 +1 @@
|
||||
bin/
|
||||
config/client-info.yml
|
||||
test/sanity/sanity-test-secret-file.yaml
|
||||
|
||||
@@ -12,20 +12,16 @@ RUN go mod download
|
||||
|
||||
COPY Makefile .
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
COPY main.go .
|
||||
COPY pkg ./pkg
|
||||
RUN env GOARCH=$(echo "$TARGETPLATFORM" | cut -f2 -d/) \
|
||||
GOARM=$(echo "$TARGETPLATFORM" | cut -f3 -d/ | cut -c2-) \
|
||||
make
|
||||
RUN make
|
||||
|
||||
############## Final stage ##############
|
||||
FROM alpine:latest
|
||||
LABEL maintainers="Synology Authors" \
|
||||
description="Synology CSI Plugin"
|
||||
|
||||
RUN apk add --no-cache e2fsprogs e2fsprogs-extra xfsprogs xfsprogs-extra blkid util-linux iproute2 bash btrfs-progs ca-certificates cifs-utils
|
||||
RUN apk add --no-cache e2fsprogs e2fsprogs-extra xfsprogs xfsprogs-extra blkid util-linux iproute2 bash
|
||||
|
||||
# Create symbolic link for chroot.sh
|
||||
WORKDIR /
|
||||
|
||||
13
Makefile
13
Makefile
@@ -2,15 +2,12 @@
|
||||
|
||||
REGISTRY_NAME=synology
|
||||
IMAGE_NAME=synology-csi
|
||||
IMAGE_VERSION=v1.1.0
|
||||
IMAGE_VERSION=v1.0.0
|
||||
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(IMAGE_VERSION)
|
||||
|
||||
# For now, only build linux/amd64 platform
|
||||
ifeq ($(GOARCH),)
|
||||
GOARCH:=amd64
|
||||
endif
|
||||
GOARM?=""
|
||||
BUILD_ENV=CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) GOARM=$(GOARM)
|
||||
GOARCH?=amd64
|
||||
BUILD_ENV=CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH)
|
||||
BUILD_FLAGS="-extldflags \"-static\""
|
||||
|
||||
.PHONY: all clean synology-csi-driver synocli test docker-build
|
||||
@@ -24,15 +21,11 @@ synology-csi-driver:
|
||||
docker-build:
|
||||
docker build -f Dockerfile -t $(IMAGE_TAG) .
|
||||
|
||||
docker-build-multiarch:
|
||||
docker buildx build -t $(IMAGE_TAG) --platform linux/amd64,linux/arm/v7,linux/arm64 . --push
|
||||
|
||||
synocli:
|
||||
@mkdir -p bin
|
||||
$(BUILD_ENV) go build -v -ldflags $(BUILD_FLAGS) -o ./bin/synocli ./synocli
|
||||
|
||||
test:
|
||||
go clean -testcache
|
||||
go test -v ./test/...
|
||||
clean:
|
||||
-rm -rf ./bin
|
||||
|
||||
69
README.md
69
README.md
@@ -4,11 +4,9 @@ The official [Container Storage Interface](https://github.com/container-storage-
|
||||
|
||||
### Container Images & Kubernetes Compatibility
|
||||
Driver Name: csi.san.synology.com
|
||||
| Driver Version | Image | Supported K8s Version |
|
||||
| -------------------------------------------------------------------------------- | --------------------------------------------------------------------- | --------------------- |
|
||||
| [v1.1.0](https://github.com/SynologyOpenSource/synology-csi/tree/release-v1.1.0) | [synology-csi:v1.1.0](https://hub.docker.com/r/synology/synology-csi) | 1.20+ |
|
||||
| [v1.0.1](https://github.com/SynologyOpenSource/synology-csi/tree/release-v1.0.1) | [synology-csi:v1.0.1](https://hub.docker.com/r/synology/synology-csi) | 1.20+ |
|
||||
| [v1.0.0](https://github.com/SynologyOpenSource/synology-csi/tree/release-v1.0.0) | [synology-csi:v1.0.0](https://hub.docker.com/r/synology/synology-csi) | 1.19 |
|
||||
| Driver Version | Image | Supported K8s Version |
|
||||
| -------------- | --------------------------------------------------------------------- | --------------------- |
|
||||
| v1.0.0 | [synology-csi:v1.0.0](https://hub.docker.com/r/synology/synology-csi) | 1.19 |
|
||||
|
||||
|
||||
|
||||
@@ -20,7 +18,7 @@ The Synology CSI driver supports:
|
||||
|
||||
## Installation
|
||||
### Prerequisites
|
||||
- Kubernetes versions 1.19 or above
|
||||
- Kubernetes versions 1.19
|
||||
- Synology NAS running DSM 7.0 or above
|
||||
- Go version 1.16 or above is recommended
|
||||
- (Optional) Both [Volume Snapshot CRDs](https://github.com/kubernetes-csi/external-snapshotter/tree/v4.0.0/client/config/crd) and the [common snapshot controller](https://github.com/kubernetes-csi/external-snapshotter/tree/v4.0.0/deploy/kubernetes/snapshot-controller) must be installed in your Kubernetes cluster if you want to use the **Snapshot** feature
|
||||
@@ -100,7 +98,6 @@ Create and apply StorageClasses with the properties you want.
|
||||
|
||||
1. Create YAML files using the one at `deploy/kubernetes/<k8s version>/storage-class.yml` as the example, whose content is as below:
|
||||
|
||||
**iSCSI Protocol**
|
||||
```
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
@@ -116,56 +113,18 @@ Create and apply StorageClasses with the properties you want.
|
||||
reclaimPolicy: Retain
|
||||
allowVolumeExpansion: true
|
||||
```
|
||||
|
||||
**SMB/CIFS Protocol**
|
||||
|
||||
Before creating an SMB/CIFS storage class, you must **create a secret** and specify the DSM user whom you want to give permissions to.
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: cifs-csi-credentials
|
||||
namespace: default
|
||||
type: Opaque
|
||||
stringData:
|
||||
username: <username> # DSM user account accessing the shared folder
|
||||
password: <password> # DSM user password accessing the shared folder
|
||||
```
|
||||
|
||||
After creating the secret, create a storage class and fill the secret for node-stage-secret. This is a **required** step if you're using SMB, or there will be errors when staging volumes.
|
||||
|
||||
```
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: synostorage-smb
|
||||
provisioner: csi.san.synology.com
|
||||
parameters:
|
||||
protocol: "smb"
|
||||
dsm: '192.168.1.1'
|
||||
location: '/volume1'
|
||||
csi.storage.k8s.io/node-stage-secret-name: "cifs-csi-credentials"
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: "default"
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
```
|
||||
|
||||
2. Configure the StorageClass properties by assigning the parameters in the table. You can also leave blank if you don’t have a preference:
|
||||
|
||||
| Name | Type | Description | Default | Supported protocols |
|
||||
| ------------------------------------------------ | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------------------- |
|
||||
| *dsm* | string | The IPv4 address of your DSM, which must be included in the `client-info.yml` for the CSI driver to log in to DSM | - | iSCSI, SMB |
|
||||
| *location* | string | The location (/volume1, /volume2, ...) on DSM where the LUN for *PersistentVolume* will be created | - | iSCSI, SMB |
|
||||
| *fsType* | string | The formatting file system of the *PersistentVolumes* when you mount them on the pods. This parameter only works with iSCSI. For SMB, the fsType is always ‘cifs‘. | 'ext4' | iSCSI |
|
||||
| *protocol* | string | The backing storage protocol. Enter ‘iscsi’ to create LUNs or ‘smb‘ to create shared folders on DSM. | 'iscsi' | iSCSI, SMB |
|
||||
| *csi.storage.k8s.io/node-stage-secret-name* | string | The name of node-stage-secret. Required if DSM shared folder is accessed via SMB. | - | SMB |
|
||||
| *csi.storage.k8s.io/node-stage-secret-namespace* | string | The namespace of node-stage-secret. Required if DSM shared folder is accessed via SMB. | - | SMB |
|
||||
| Name | Type | Description | Default |
|
||||
| ---------- | ------ | ----------------------------------------------------------------------------------------------------------------- | ------- |
|
||||
| *dsm* | string | The IPv4 address of your DSM, which must be included in the `client-info.yml` for the CSI driver to log in to DSM | - |
|
||||
| *location* | string | The location (/volume1, /volume2, ...) on DSM where the LUN for *PersistentVolume* will be created | - |
|
||||
| *fsType* | string | The formatting file system of the *PersistentVolumes* when you mount them on the pods | 'ext4' |
|
||||
|
||||
**Notice**
|
||||
|
||||
- If you leave the parameter *location* blank, the CSI driver will choose a volume on DSM with available storage to create the volumes.
|
||||
- All iSCSI volumes created by the CSI driver are Thin Provisioned LUNs on DSM. This will allow you to take snapshots of them.
|
||||
- All volumes created by the CSI driver are Thin Provisioned LUNs on DSM. This will allow you to take snapshots of them.
|
||||
|
||||
3. Apply the YAML files to the Kubernetes cluster.
|
||||
|
||||
@@ -194,10 +153,10 @@ Create and apply VolumeSnapshotClasses with the properties you want.
|
||||
|
||||
2. Configure volume snapshot class properties by assigning the following parameters, all parameters are optional:
|
||||
|
||||
| Name | Type | Description | Default | Supported protocols |
|
||||
| ------------- | ------ | -------------------------------------------- | ------- | ------------------- |
|
||||
| *description* | string | The description of the snapshot on DSM | "" | iSCSI |
|
||||
| *is_locked* | string | Whether you want to lock the snapshot on DSM | 'false' | iSCSI, SMB |
|
||||
| Name | Type | Description | Default |
|
||||
| ------------- | ------ | -------------------------------------------- | ------- |
|
||||
| *description* | string | The description of the snapshot on DSM | "" |
|
||||
| *is_locked* | string | Whether you want to lock the snapshot on DSM | 'false' |
|
||||
|
||||
3. Apply the YAML files to the Kubernetes cluster.
|
||||
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: synology-smb-storage
|
||||
provisioner: csi.san.synology.com
|
||||
parameters:
|
||||
protocol: "smb" # required for smb protocol
|
||||
# Before creating an SMB storage class, you must create a secret and specify the DSM user whom you want to give permissions to.
|
||||
csi.storage.k8s.io/node-stage-secret-name: "cifs-csi-credentials" # required for smb protocol
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: "default" # required for smb protocol
|
||||
# dsm: "1.1.1.1"
|
||||
# location: '/volume1'
|
||||
# mountOptions:
|
||||
# - dir_mode=0777
|
||||
# - file_mode=0777
|
||||
# - uid=0
|
||||
# - gid=0
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
@@ -25,9 +25,6 @@ rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
@@ -35,7 +32,7 @@ rules:
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments", "volumeattachments/status"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
@@ -46,9 +43,6 @@ rules:
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
@@ -91,7 +85,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
|
||||
image: quay.io/k8scsi/csi-provisioner:v1.6.0
|
||||
args:
|
||||
- --timeout=60s
|
||||
- --csi-address=$(ADDRESS)
|
||||
@@ -109,7 +103,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
|
||||
image: quay.io/k8scsi/csi-attacher:v2.1.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
@@ -126,7 +120,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
|
||||
image: quay.io/k8scsi/csi-resizer:v0.5.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
@@ -143,7 +137,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: synology/synology-csi:v1.1.0
|
||||
image: synology/synology-csi:v1.0.0
|
||||
args:
|
||||
- --nodeid=NotUsed
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
|
||||
@@ -63,7 +63,7 @@ spec:
|
||||
securityContext:
|
||||
privileged: true
|
||||
imagePullPolicy: Always
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS) # the csi socket path inside the pod
|
||||
@@ -86,7 +86,7 @@ spec:
|
||||
securityContext:
|
||||
privileged: true
|
||||
imagePullPolicy: IfNotPresent
|
||||
image: synology/synology-csi:v1.1.0
|
||||
image: synology/synology-csi:v1.0.0
|
||||
args:
|
||||
- --nodeid=$(KUBE_NODE_NAME)
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
|
||||
@@ -64,7 +64,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.3
|
||||
image: quay.io/k8scsi/csi-snapshotter:v3.0.3
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
@@ -81,7 +81,7 @@ spec:
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: synology/synology-csi:v1.1.0
|
||||
image: synology/synology-csi:v1.0.0
|
||||
args:
|
||||
- --nodeid=NotUsed
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
|
||||
@@ -7,5 +7,5 @@ metadata:
|
||||
driver: csi.san.synology.com
|
||||
deletionPolicy: Delete
|
||||
# parameters:
|
||||
# description: 'Kubernetes CSI' # only for iscsi protocol
|
||||
# description: 'Kubernetes CSI'
|
||||
# is_locked: 'false'
|
||||
@@ -1,168 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-controller-sa
|
||||
namespace: synology-csi
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-controller-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims/status"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["csi.storage.k8s.io"]
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments", "volumeattachments/status"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-controller-role
|
||||
namespace: synology-csi
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-controller-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: synology-csi-controller-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: synology-csi-controller
|
||||
namespace: synology-csi
|
||||
spec:
|
||||
serviceName: "synology-csi-controller"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: synology-csi-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: synology-csi-controller
|
||||
spec:
|
||||
serviceAccountName: csi-controller-sa
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
|
||||
args:
|
||||
- --timeout=60s
|
||||
- --csi-address=$(ADDRESS)
|
||||
- --v=5
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: csi-attacher
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: csi-resizer
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: csi-plugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: synology/synology-csi:v1.1.0
|
||||
args:
|
||||
- --nodeid=NotUsed
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
- --client-info
|
||||
- /etc/synology/client-info.yml
|
||||
- --log-level=info
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: client-info
|
||||
mountPath: /etc/synology
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
||||
- name: client-info
|
||||
secret:
|
||||
secretName: client-info-secret
|
||||
@@ -1,9 +0,0 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: csi.san.synology.com
|
||||
spec:
|
||||
attachRequired: true # Indicates the driver requires an attach operation (TODO: ControllerPublishVolume should be implemented)
|
||||
podInfoOnMount: true
|
||||
volumeLifecycleModes:
|
||||
- Persistent
|
||||
@@ -1,4 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: synology-csi
|
||||
@@ -1,139 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-node-sa
|
||||
namespace: synology-csi
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-node-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-node-role
|
||||
namespace: synology-csi
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-node-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: synology-csi-node-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: synology-csi-node
|
||||
namespace: synology-csi
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: synology-csi-node
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: synology-csi-node
|
||||
spec:
|
||||
serviceAccount: csi-node-sa
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: csi-driver-registrar
|
||||
securityContext:
|
||||
privileged: true
|
||||
imagePullPolicy: Always
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS) # the csi socket path inside the pod
|
||||
- --kubelet-registration-path=$(REGISTRATION_PATH) # the csi socket path on the host node
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
- name: REGISTRATION_PATH
|
||||
value: /var/lib/kubelet/plugins/csi.san.synology.com/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: registration-dir
|
||||
mountPath: /registration
|
||||
- name: csi-plugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
imagePullPolicy: IfNotPresent
|
||||
image: synology/synology-csi:v1.1.0
|
||||
args:
|
||||
- --nodeid=$(KUBE_NODE_NAME)
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
- --client-info
|
||||
- /etc/synology/client-info.yml
|
||||
- --log-level=info
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: kubelet-dir
|
||||
mountPath: /var/lib/kubelet
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: client-info
|
||||
mountPath: /etc/synology
|
||||
readOnly: true
|
||||
- name: host-root
|
||||
mountPath: /host
|
||||
- name: device-dir
|
||||
mountPath: /dev
|
||||
volumes:
|
||||
- name: kubelet-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet
|
||||
type: Directory
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi.san.synology.com/
|
||||
type: DirectoryOrCreate
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: Directory
|
||||
- name: client-info
|
||||
secret:
|
||||
secretName: client-info-secret
|
||||
- name: host-root
|
||||
hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
- name: device-dir
|
||||
hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
@@ -1,106 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-snapshotter-sa
|
||||
namespace: synology-csi
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-snapshotter-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["create", "get", "list", "watch", "update", "delete"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents/status"]
|
||||
verbs: ["update"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-snapshotter-role
|
||||
namespace: synology-csi
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-snapshotter-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: synology-csi-snapshotter-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: synology-csi-snapshotter
|
||||
namespace: synology-csi
|
||||
spec:
|
||||
serviceName: "synology-csi-snapshotter"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: synology-csi-snapshotter
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: synology-csi-snapshotter
|
||||
spec:
|
||||
serviceAccountName: csi-snapshotter-sa
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: csi-plugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: synology/synology-csi:v1.1.0
|
||||
args:
|
||||
- --nodeid=NotUsed
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
- --client-info
|
||||
- /etc/synology/client-info.yml
|
||||
- --log-level=info
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: client-info
|
||||
mountPath: /etc/synology
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
||||
- name: client-info
|
||||
secret:
|
||||
secretName: client-info-secret
|
||||
@@ -1,11 +0,0 @@
|
||||
apiVersion: snapshot.storage.k8s.io/v1
|
||||
kind: VolumeSnapshotClass
|
||||
metadata:
|
||||
name: synology-snapshotclass
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "false"
|
||||
driver: csi.san.synology.com
|
||||
deletionPolicy: Delete
|
||||
# parameters:
|
||||
# description: 'Kubernetes CSI' # only for iscsi protocol
|
||||
# is_locked: 'false'
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: synology-iscsi-storage
|
||||
# annotations:
|
||||
# storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: csi.san.synology.com
|
||||
# if all params are empty, synology CSI will choose an available location to create volume
|
||||
# parameters:
|
||||
# dsm: '1.1.1.1'
|
||||
# location: '/volume1'
|
||||
# fsType: 'ext4'
|
||||
reclaimPolicy: Retain
|
||||
allowVolumeExpansion: true
|
||||
@@ -1,279 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-controller-sa
|
||||
namespace: synology-csi
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-controller-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims/status"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["csi.storage.k8s.io"]
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments/status"]
|
||||
verbs: ["patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
# The following rule should be uncommented for plugins that require secrets
|
||||
# for provisioning.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-controller-role
|
||||
namespace: synology-csi
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-controller-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: synology-csi-controller-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# Provisioner must be able to work with endpoints in current namespace
|
||||
# if (and only if) leadership election is enabled
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
namespace: synology-csi
|
||||
name: synology-provisioner-cfg
|
||||
rules:
|
||||
# Only one of the following rules for endpoints or leases is required based on
|
||||
# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases.
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
# Permissions for CSIStorageCapacity are only needed enabling the publishing
|
||||
# of storage capacity information.
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csistoragecapacities"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
# The GET permissions below are needed for walking up the ownership chain
|
||||
# for CSIStorageCapacity. They are sufficient for deployment via
|
||||
# StatefulSet (only needs to get Pod) and Deployment (needs to get
|
||||
# Pod and then ReplicaSet to find the Deployment).
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["replicasets"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-provisioner-role-cfg
|
||||
namespace: synology-csi
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-controller-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: synology-provisioner-cfg
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: synology-csi-controller
|
||||
namespace: synology-csi
|
||||
spec:
|
||||
serviceName: "synology-csi-controller"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: synology-csi-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: synology-csi-controller
|
||||
spec:
|
||||
serviceAccountName: csi-controller-sa
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: gcr.io/k8s-staging-sig-storage/csi-provisioner:v3.0.0
|
||||
args:
|
||||
- --timeout=60s
|
||||
- --csi-address=$(ADDRESS)
|
||||
- --leader-election
|
||||
# - --enable-capacity
|
||||
- --http-endpoint=:8080
|
||||
- --v=1
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: prov-port
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 1
|
||||
httpGet:
|
||||
path: /healthz/leader-election
|
||||
port: prov-port
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 20
|
||||
- name: csi-attacher
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: gcr.io/k8s-staging-sig-storage/csi-attacher:v3.4.0
|
||||
args:
|
||||
- --v=1
|
||||
- --csi-address=$(ADDRESS)
|
||||
- --leader-election
|
||||
- --http-endpoint=:8081
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
- name: MY_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
name: attach-port
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 1
|
||||
httpGet:
|
||||
path: /healthz/leader-election
|
||||
port: attach-port
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 20
|
||||
- name: csi-resizer
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: gcr.io/k8s-staging-sig-storage/csi-resizer:v1.3.0
|
||||
args:
|
||||
- --v=1
|
||||
- --csi-address=$(ADDRESS)
|
||||
- --leader-election
|
||||
- --http-endpoint=:8082
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
ports:
|
||||
- containerPort: 8082
|
||||
name: resizer-port
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 1
|
||||
httpGet:
|
||||
path: /healthz/leader-election
|
||||
port: resizer-port
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 20
|
||||
- name: csi-plugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: cristicalin/synology-csi:v1.0.0
|
||||
args:
|
||||
- --nodeid=NotUsed
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
- --client-info
|
||||
- /etc/synology/client-info.yml
|
||||
- --log-level=info
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: client-info
|
||||
mountPath: /etc/synology
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
||||
- name: client-info
|
||||
secret:
|
||||
secretName: client-info-secret
|
||||
@@ -1,9 +0,0 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: csi.san.synology.com
|
||||
spec:
|
||||
attachRequired: true # Indicates the driver requires an attach operation (TODO: ControllerPublishVolume should be implemented)
|
||||
podInfoOnMount: true
|
||||
volumeLifecycleModes:
|
||||
- Persistent
|
||||
@@ -1,4 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: synology-csi
|
||||
@@ -1,139 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-node-sa
|
||||
namespace: synology-csi
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-node-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-node-role
|
||||
namespace: synology-csi
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-node-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: synology-csi-node-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: synology-csi-node
|
||||
namespace: synology-csi
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: synology-csi-node
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: synology-csi-node
|
||||
spec:
|
||||
serviceAccount: csi-node-sa
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: csi-driver-registrar
|
||||
securityContext:
|
||||
privileged: true
|
||||
imagePullPolicy: Always
|
||||
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS) # the csi socket path inside the pod
|
||||
- --kubelet-registration-path=$(REGISTRATION_PATH) # the csi socket path on the host node
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
- name: REGISTRATION_PATH
|
||||
value: /var/lib/kubelet/plugins/csi.san.synology.com/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: registration-dir
|
||||
mountPath: /registration
|
||||
- name: csi-plugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
imagePullPolicy: IfNotPresent
|
||||
image: cristicalin/synology-csi:v1.0.0
|
||||
args:
|
||||
- --nodeid=$(KUBE_NODE_NAME)
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
- --client-info
|
||||
- /etc/synology/client-info.yml
|
||||
- --log-level=info
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: kubelet-dir
|
||||
mountPath: /var/lib/kubelet
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: client-info
|
||||
mountPath: /etc/synology
|
||||
readOnly: true
|
||||
- name: host-root
|
||||
mountPath: /host
|
||||
- name: device-dir
|
||||
mountPath: /dev
|
||||
volumes:
|
||||
- name: kubelet-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet
|
||||
type: Directory
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi.san.synology.com/
|
||||
type: DirectoryOrCreate
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: Directory
|
||||
- name: client-info
|
||||
secret:
|
||||
secretName: client-info-secret
|
||||
- name: host-root
|
||||
hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
- name: device-dir
|
||||
hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
@@ -1,157 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-snapshotter-sa
|
||||
namespace: synology-csi
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-snapshotter-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["create", "get", "list", "watch", "update", "delete"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents/status"]
|
||||
verbs: ["update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-snapshotter-role
|
||||
namespace: synology-csi
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-snapshotter-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: synology-csi-snapshotter-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
namespace: synology-csi
|
||||
name: synology-csi-snapshotter-cfg
|
||||
rules:
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
namespace: synology-csi
|
||||
name: synology-csi-snapshotter-role-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-snapshotter-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: synology-csi-snapshotter-cfg
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: synology-csi-snapshotter
|
||||
namespace: synology-csi
|
||||
spec:
|
||||
serviceName: "synology-csi-snapshotter"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: synology-csi-snapshotter
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: synology-csi-snapshotter
|
||||
spec:
|
||||
serviceAccountName: csi-snapshotter-sa
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.1
|
||||
args:
|
||||
- --v=1
|
||||
- --csi-address=$(ADDRESS)
|
||||
- --leader-election
|
||||
- --http-endpoint=:8083
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
ports:
|
||||
- containerPort: 8083
|
||||
name: snap-port
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 1
|
||||
httpGet:
|
||||
path: /healthz/leader-election
|
||||
port: snap-port
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 20
|
||||
- name: csi-plugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: cristicalin/synology-csi:v1.0.0
|
||||
args:
|
||||
- --nodeid=$(KUBE_NODE_NAME)
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
- --client-info
|
||||
- /etc/synology/client-info.yml
|
||||
- --log-level=info
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: client-info
|
||||
mountPath: /etc/synology
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
||||
- name: client-info
|
||||
secret:
|
||||
secretName: client-info-secret
|
||||
@@ -1,13 +0,0 @@
|
||||
apiVersion: snapshot.storage.k8s.io/v1beta1
|
||||
kind: VolumeSnapshotClass
|
||||
metadata:
|
||||
name: synology-snapshotclass
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "false"
|
||||
labels:
|
||||
velero.io/csi-volumesnapshot-class: "true"
|
||||
driver: csi.san.synology.com
|
||||
deletionPolicy: Retain
|
||||
# parameters:
|
||||
# description: 'Kubernetes CSI'
|
||||
# is_locked: 'false'
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: synology-iscsi-storage
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: csi.san.synology.com
|
||||
# if all params are empty, synology CSI will choose an available location to create volume
|
||||
# parameters:
|
||||
# dsm: '1.1.1.1'
|
||||
# location: '/volume1'
|
||||
# fsType: 'ext4'
|
||||
reclaimPolicy: Retain
|
||||
allowVolumeExpansion: true
|
||||
@@ -1,281 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-controller-sa
|
||||
namespace: synology-csi
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-controller-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims/status"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csinodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["csi.storage.k8s.io"]
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments", "volumeattachments/status"]
|
||||
verbs: ["get", "list", "watch", "update", "patch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["get", "list"]
|
||||
# The following rule should be uncommented for plugins that require secrets
|
||||
# for provisioning.
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-controller-role
|
||||
namespace: synology-csi
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-controller-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: synology-csi-controller-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# Provisioner must be able to work with endpoints in current namespace
|
||||
# if (and only if) leadership election is enabled
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
namespace: synology-csi
|
||||
name: synology-provisioner-cfg
|
||||
rules:
|
||||
# Only one of the following rules for endpoints or leases is required based on
|
||||
# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases.
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
# Permissions for CSIStorageCapacity are only needed enabling the publishing
|
||||
# of storage capacity information.
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["csistoragecapacities"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
|
||||
# The GET permissions below are needed for walking up the ownership chain
|
||||
# for CSIStorageCapacity. They are sufficient for deployment via
|
||||
# StatefulSet (only needs to get Pod) and Deployment (needs to get
|
||||
# Pod and then ReplicaSet to find the Deployment).
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["replicasets"]
|
||||
verbs: ["get"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets"]
|
||||
verbs: ["get"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-provisioner-role-cfg
|
||||
namespace: synology-csi
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-controller-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: synology-provisioner-cfg
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: synology-csi-controller
|
||||
namespace: synology-csi
|
||||
spec:
|
||||
serviceName: "synology-csi-controller"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: synology-csi-controller
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: synology-csi-controller
|
||||
spec:
|
||||
serviceAccountName: csi-controller-sa
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
|
||||
args:
|
||||
- --timeout=60s
|
||||
- --csi-address=$(ADDRESS)
|
||||
- --leader-election
|
||||
# - --enable-capacity
|
||||
- --http-endpoint=:8080
|
||||
- --v=5
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: prov-port
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 1
|
||||
httpGet:
|
||||
path: /healthz/leader-election
|
||||
port: prov-port
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 20
|
||||
- name: csi-attacher
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
- --leader-election
|
||||
- --http-endpoint=:8081
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
- name: MY_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
name: attach-port
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 1
|
||||
httpGet:
|
||||
path: /healthz/leader-election
|
||||
port: attach-port
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 20
|
||||
- name: csi-resizer
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
- --leader-election
|
||||
- --http-endpoint=:8082
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
ports:
|
||||
- containerPort: 8082
|
||||
name: resizer-port
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 1
|
||||
httpGet:
|
||||
path: /healthz/leader-election
|
||||
port: resizer-port
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 20
|
||||
- name: csi-plugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: cristicalin/synology-csi:v1.1.0
|
||||
args:
|
||||
- --nodeid=$(KUBE_NODE_NAME)
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
- --client-info
|
||||
- /etc/synology/client-info.yml
|
||||
- --log-level=info
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: client-info
|
||||
mountPath: /etc/synology
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
||||
- name: client-info
|
||||
secret:
|
||||
secretName: client-info-secret
|
||||
@@ -1,11 +0,0 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: CSIDriver
|
||||
metadata:
|
||||
name: csi.san.synology.com
|
||||
spec:
|
||||
attachRequired: true # Indicates the driver requires an attach operation (TODO: ControllerPublishVolume should be implemented)
|
||||
podInfoOnMount: true
|
||||
# fsGroupPolicy: File (see https://kubernetes-csi.github.io/docs/support-fsgroup.html#supported-modes)
|
||||
fsGroupPolicy: ReadWriteOnceWithFSType
|
||||
volumeLifecycleModes:
|
||||
- Persistent
|
||||
@@ -1,4 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: synology-csi
|
||||
@@ -1,150 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-node-sa
|
||||
namespace: synology-csi
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-node-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["namespaces"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-node-role
|
||||
namespace: synology-csi
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-node-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: synology-csi-node-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: synology-csi-node
|
||||
namespace: synology-csi
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: synology-csi-node
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: synology-csi-node
|
||||
spec:
|
||||
serviceAccount: csi-node-sa
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: csi-driver-registrar
|
||||
securityContext:
|
||||
privileged: true
|
||||
imagePullPolicy: Always
|
||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.1
|
||||
# image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.0
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS) # the csi socket path inside the pod
|
||||
- --kubelet-registration-path=$(REGISTRATION_PATH) # the csi socket path on the host node
|
||||
- --health-port=9809
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
- name: REGISTRATION_PATH
|
||||
value: /var/lib/kubelet/plugins/csi.san.synology.com/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: registration-dir
|
||||
mountPath: /registration
|
||||
ports:
|
||||
- containerPort: 9809
|
||||
name: healthz
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
- name: csi-plugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
imagePullPolicy: IfNotPresent
|
||||
image: cristicalin/synology-csi:v1.1.0
|
||||
args:
|
||||
- --nodeid=$(KUBE_NODE_NAME)
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
- --client-info
|
||||
- /etc/synology/client-info.yml
|
||||
- --log-level=info
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix://csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- name: kubelet-dir
|
||||
mountPath: /var/lib/kubelet
|
||||
mountPropagation: "Bidirectional"
|
||||
- name: plugin-dir
|
||||
mountPath: /csi
|
||||
- name: client-info
|
||||
mountPath: /etc/synology
|
||||
readOnly: true
|
||||
- name: host-root
|
||||
mountPath: /host
|
||||
- name: device-dir
|
||||
mountPath: /dev
|
||||
volumes:
|
||||
- name: kubelet-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet
|
||||
type: Directory
|
||||
- name: plugin-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi.san.synology.com/
|
||||
type: DirectoryOrCreate
|
||||
- name: registration-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins_registry
|
||||
type: Directory
|
||||
- name: client-info
|
||||
secret:
|
||||
secretName: client-info-secret
|
||||
- name: host-root
|
||||
hostPath:
|
||||
path: /
|
||||
type: Directory
|
||||
- name: device-dir
|
||||
hostPath:
|
||||
path: /dev
|
||||
type: Directory
|
||||
@@ -1,157 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-snapshotter-sa
|
||||
namespace: synology-csi
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-snapshotter-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents/status"]
|
||||
verbs: ["update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: synology-csi-snapshotter-role
|
||||
namespace: synology-csi
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-snapshotter-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: synology-csi-snapshotter-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
namespace: synology-csi
|
||||
name: synology-csi-snapshotter-cfg
|
||||
rules:
|
||||
- apiGroups: ["coordination.k8s.io"]
|
||||
resources: ["leases"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
namespace: synology-csi
|
||||
name: synology-csi-snapshotter-role-cfg
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-snapshotter-sa
|
||||
namespace: synology-csi
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: synology-csi-snapshotter-cfg
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: synology-csi-snapshotter
|
||||
namespace: synology-csi
|
||||
spec:
|
||||
serviceName: "synology-csi-snapshotter"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: synology-csi-snapshotter
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: synology-csi-snapshotter
|
||||
spec:
|
||||
serviceAccountName: csi-snapshotter-sa
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
- --leader-election
|
||||
- --http-endpoint=:8083
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
ports:
|
||||
- containerPort: 8083
|
||||
name: snap-port
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
failureThreshold: 1
|
||||
httpGet:
|
||||
path: /healthz/leader-election
|
||||
port: snap-port
|
||||
initialDelaySeconds: 10
|
||||
timeoutSeconds: 10
|
||||
periodSeconds: 20
|
||||
- name: csi-plugin
|
||||
securityContext:
|
||||
privileged: true
|
||||
capabilities:
|
||||
add: ["SYS_ADMIN"]
|
||||
allowPrivilegeEscalation: true
|
||||
image: cristicalin/synology-csi:v1.1.0
|
||||
args:
|
||||
- --nodeid=$(KUBE_NODE_NAME)
|
||||
- --endpoint=$(CSI_ENDPOINT)
|
||||
- --client-info
|
||||
- /etc/synology/client-info.yml
|
||||
- --log-level=info
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /var/lib/csi/sockets/pluginproxy/
|
||||
- name: client-info
|
||||
mountPath: /etc/synology
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
emptyDir: {}
|
||||
- name: client-info
|
||||
secret:
|
||||
secretName: client-info-secret
|
||||
@@ -1,13 +0,0 @@
|
||||
apiVersion: snapshot.storage.k8s.io/v1
|
||||
kind: VolumeSnapshotClass
|
||||
metadata:
|
||||
name: synology-snapshotclass
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "false"
|
||||
labels:
|
||||
velero.io/csi-volumesnapshot-class: "true"
|
||||
driver: csi.san.synology.com
|
||||
deletionPolicy: Retain
|
||||
# parameters:
|
||||
# description: 'Kubernetes CSI'
|
||||
# is_locked: 'false'
|
||||
@@ -1,14 +0,0 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: synology-iscsi-storage
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: csi.san.synology.com
|
||||
# if all params are empty, synology CSI will choose an available location to create volume
|
||||
# parameters:
|
||||
# dsm: '1.1.1.1'
|
||||
# location: '/volume1'
|
||||
# fsType: 'ext4'
|
||||
reclaimPolicy: Retain
|
||||
allowVolumeExpansion: true
|
||||
1
go.mod
1
go.mod
@@ -17,7 +17,6 @@ require (
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af // indirect
|
||||
google.golang.org/grpc v1.40.0
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/klog/v2 v2.20.0 // indirect
|
||||
k8s.io/mount-utils v0.22.1
|
||||
|
||||
@@ -19,17 +19,15 @@ package driver
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"time"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/interfaces"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/models"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
|
||||
@@ -120,56 +118,36 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
||||
isThin = utils.StringToBoolean(params["thin_provisioning"])
|
||||
}
|
||||
|
||||
protocol := strings.ToLower(params["protocol"])
|
||||
if protocol == "" {
|
||||
protocol = utils.ProtocolDefault
|
||||
} else if !isProtocolSupport(protocol) {
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported volume protocol")
|
||||
}
|
||||
|
||||
spec := &models.CreateK8sVolumeSpec{
|
||||
DsmIp: params["dsm"],
|
||||
K8sVolumeName: volName,
|
||||
LunName: models.GenLunName(volName),
|
||||
ShareName: models.GenShareName(volName),
|
||||
LunName: fmt.Sprintf("%s-%s", models.LunPrefix, volName),
|
||||
Location: params["location"],
|
||||
Size: sizeInByte,
|
||||
Type: params["type"],
|
||||
ThinProvisioning: isThin,
|
||||
TargetName: fmt.Sprintf("%s-%s", models.TargetPrefix, volName),
|
||||
TargetName: fmt.Sprintf("%s-%s", models.LunPrefix, volName),
|
||||
MultipleSession: multiSession,
|
||||
SourceSnapshotId: srcSnapshotId,
|
||||
SourceVolumeId: srcVolumeId,
|
||||
Protocol: protocol,
|
||||
}
|
||||
|
||||
// idempotency
|
||||
// Note: an SMB PV may not be tested existed precisely because the share folder name was sliced from k8sVolumeName
|
||||
k8sVolume := cs.dsmService.GetVolumeByName(volName)
|
||||
if k8sVolume == nil {
|
||||
k8sVolume, err = cs.dsmService.CreateVolume(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// already existed
|
||||
log.Debugf("Volume [%s] already exists in [%s], backing name: [%s]", volName, k8sVolume.DsmIp, k8sVolume.Name)
|
||||
lunInfo, dsmIp, err := cs.dsmService.CreateVolume(spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if (k8sVolume.Protocol == utils.ProtocolIscsi && k8sVolume.SizeInBytes != sizeInByte) ||
|
||||
(k8sVolume.Protocol == utils.ProtocolSmb && utils.BytesToMB(k8sVolume.SizeInBytes) != utils.BytesToMBCeil(sizeInByte)) {
|
||||
if int64(lunInfo.Size) != sizeInByte {
|
||||
return nil , status.Errorf(codes.AlreadyExists, "Already existing volume name with different capacity")
|
||||
}
|
||||
|
||||
return &csi.CreateVolumeResponse{
|
||||
Volume: &csi.Volume{
|
||||
VolumeId: k8sVolume.VolumeId,
|
||||
CapacityBytes: k8sVolume.SizeInBytes,
|
||||
VolumeId: lunInfo.Uuid,
|
||||
CapacityBytes: int64(lunInfo.Size),
|
||||
ContentSource: volContentSrc,
|
||||
VolumeContext: map[string]string{
|
||||
"dsm": k8sVolume.DsmIp,
|
||||
"protocol": k8sVolume.Protocol,
|
||||
"source": k8sVolume.Source,
|
||||
"dsm": dsmIp,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
@@ -234,11 +212,9 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume
|
||||
pagingSkip := ("" != startingToken)
|
||||
infos := cs.dsmService.ListVolumes()
|
||||
|
||||
sort.Sort(models.ByVolumeId(infos))
|
||||
|
||||
var count int32 = 0
|
||||
for _, info := range infos {
|
||||
if info.VolumeId == startingToken {
|
||||
if info.Lun.Uuid == startingToken {
|
||||
pagingSkip = false
|
||||
}
|
||||
|
||||
@@ -247,20 +223,18 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume
|
||||
}
|
||||
|
||||
if maxEntries > 0 && count >= maxEntries {
|
||||
nextToken = info.VolumeId
|
||||
nextToken = info.Lun.Uuid
|
||||
break
|
||||
}
|
||||
|
||||
entries = append(entries, &csi.ListVolumesResponse_Entry{
|
||||
Volume: &csi.Volume{
|
||||
VolumeId: info.VolumeId,
|
||||
CapacityBytes: info.SizeInBytes,
|
||||
VolumeId: info.Lun.Uuid,
|
||||
CapacityBytes: int64(info.Lun.Size),
|
||||
VolumeContext: map[string]string{
|
||||
"dsm": info.DsmIp,
|
||||
"lunName": info.Lun.Name,
|
||||
"targetIqn": info.Target.Iqn,
|
||||
"shareName": info.Share.Name,
|
||||
"protocol": info.Protocol,
|
||||
},
|
||||
},
|
||||
})
|
||||
@@ -316,7 +290,7 @@ func (cs *controllerServer) ControllerGetCapabilities(ctx context.Context, req *
|
||||
|
||||
func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
|
||||
srcVolId := req.GetSourceVolumeId()
|
||||
snapshotName := req.GetName() // snapshot-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
|
||||
snapshotName := req.GetName()
|
||||
params := req.GetParameters()
|
||||
|
||||
if srcVolId == "" {
|
||||
@@ -327,28 +301,37 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
return nil, status.Error(codes.InvalidArgument, "Snapshot name is empty.")
|
||||
}
|
||||
|
||||
// idempotency
|
||||
orgSnap := cs.dsmService.GetSnapshotByName(snapshotName)
|
||||
if orgSnap != nil {
|
||||
// already existed
|
||||
if orgSnap.ParentUuid != srcVolId {
|
||||
return nil, status.Errorf(codes.AlreadyExists, fmt.Sprintf("Snapshot [%s] already exists but volume id is incompatible", snapshotName))
|
||||
}
|
||||
if orgSnap.CreateTime < 0 {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Bad create time: %v", orgSnap.CreateTime))
|
||||
}
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: orgSnap.SizeInBytes,
|
||||
SnapshotId: orgSnap.Uuid,
|
||||
SourceVolumeId: orgSnap.ParentUuid,
|
||||
CreationTime: timestamppb.New(time.Unix(orgSnap.CreateTime, 0)),
|
||||
ReadyToUse: (orgSnap.Status == "Healthy"),
|
||||
},
|
||||
}, nil
|
||||
snapshotInfos, err := cs.dsmService.ListAllSnapshots()
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to ListAllSnapshots(), err: %v", err))
|
||||
}
|
||||
|
||||
// idempotency
|
||||
for _, snapshotInfo := range snapshotInfos {
|
||||
if snapshotInfo.Name == snapshotName {
|
||||
if snapshotInfo.ParentUuid != srcVolId {
|
||||
return nil, status.Errorf(codes.AlreadyExists, fmt.Sprintf("Snapshot [%s] already exists but volume id is incompatible", snapshotName))
|
||||
}
|
||||
|
||||
createTime, err := ptypes.TimestampProto(time.Unix(snapshotInfo.CreateTime, 0))
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to convert create time, err: %v", err))
|
||||
}
|
||||
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: snapshotInfo.TotalSize,
|
||||
SnapshotId: snapshotInfo.Uuid,
|
||||
SourceVolumeId: snapshotInfo.ParentUuid,
|
||||
CreationTime: createTime,
|
||||
ReadyToUse: (snapshotInfo.Status == "Healthy"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// not exist, going to create a new snapshot
|
||||
spec := &models.CreateK8sVolumeSnapshotSpec{
|
||||
K8sVolumeId: srcVolId,
|
||||
SnapshotName: snapshotName,
|
||||
@@ -357,19 +340,35 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
IsLocked: utils.StringToBoolean(params["is_locked"]),
|
||||
}
|
||||
|
||||
snapshot, err := cs.dsmService.CreateSnapshot(spec)
|
||||
snapshotId, err := cs.dsmService.CreateSnapshot(spec)
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Failed to CreateSnapshot, snapshotName: %s, srcVolId: %s, err: %v", snapshotName, srcVolId, err)
|
||||
return nil, err
|
||||
if err == utils.OutOfFreeSpaceError("") || err == utils.SnapshotReachMaxCountError("") {
|
||||
return nil,status.Errorf(codes.ResourceExhausted, fmt.Sprintf("Failed to CreateSnapshot(%s), err: %v", srcVolId, err))
|
||||
} else {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to CreateSnapshot(%s), err: %v", srcVolId, err))
|
||||
}
|
||||
}
|
||||
|
||||
snapshotInfo, err := cs.dsmService.GetSnapshot(snapshotId)
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to GetSnapshot(%s), err: %v", snapshotId, err))
|
||||
}
|
||||
|
||||
createTime, err := ptypes.TimestampProto(time.Unix(snapshotInfo.CreateTime, 0))
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to convert create time, err: %v", err))
|
||||
}
|
||||
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: snapshot.SizeInBytes,
|
||||
SnapshotId: snapshot.Uuid,
|
||||
SourceVolumeId: snapshot.ParentUuid,
|
||||
CreationTime: timestamppb.New(time.Unix(snapshot.CreateTime, 0)),
|
||||
ReadyToUse: (snapshot.Status == "Healthy"),
|
||||
SizeBytes: snapshotInfo.TotalSize,
|
||||
SnapshotId: snapshotInfo.Uuid,
|
||||
SourceVolumeId: snapshotInfo.ParentUuid,
|
||||
CreationTime: createTime,
|
||||
ReadyToUse: (snapshotInfo.Status == "Healthy"),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -403,19 +402,22 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap
|
||||
}
|
||||
|
||||
pagingSkip := ("" != startingToken)
|
||||
var snapshots []*models.K8sSnapshotRespSpec
|
||||
var snapshotInfos []webapi.SnapshotInfo
|
||||
var err error
|
||||
|
||||
if (srcVolId != "") {
|
||||
snapshots = cs.dsmService.ListSnapshots(srcVolId)
|
||||
snapshotInfos, err = cs.dsmService.ListSnapshots(srcVolId)
|
||||
} else {
|
||||
snapshots = cs.dsmService.ListAllSnapshots()
|
||||
snapshotInfos, err = cs.dsmService.ListAllSnapshots()
|
||||
}
|
||||
|
||||
sort.Sort(models.BySnapshotAndParentUuid(snapshots))
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to ListSnapshots(%s), err: %v", srcVolId, err))
|
||||
}
|
||||
|
||||
var count int32 = 0
|
||||
for _, snapshot := range snapshots {
|
||||
if snapshot.Uuid == startingToken {
|
||||
for _, snapshotInfo := range snapshotInfos {
|
||||
if snapshotInfo.Uuid == startingToken {
|
||||
pagingSkip = false
|
||||
}
|
||||
|
||||
@@ -423,21 +425,28 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap
|
||||
continue
|
||||
}
|
||||
|
||||
if snapshotId != "" && snapshot.Uuid != snapshotId {
|
||||
if snapshotId != "" && snapshotInfo.Uuid != snapshotId {
|
||||
continue
|
||||
}
|
||||
|
||||
if maxEntries > 0 && count >= maxEntries {
|
||||
nextToken = snapshot.Uuid
|
||||
nextToken = snapshotInfo.Uuid
|
||||
break
|
||||
}
|
||||
|
||||
createTime, err := ptypes.TimestampProto(time.Unix(snapshotInfo.CreateTime, 0))
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to convert create time, err: %v", err))
|
||||
}
|
||||
|
||||
entries = append(entries, &csi.ListSnapshotsResponse_Entry{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: snapshot.SizeInBytes,
|
||||
SnapshotId: snapshot.Uuid,
|
||||
SourceVolumeId: snapshot.ParentUuid,
|
||||
CreationTime: timestamppb.New(time.Unix(snapshot.CreateTime, 0)),
|
||||
ReadyToUse: (snapshot.Status == "Healthy"),
|
||||
SizeBytes: snapshotInfo.TotalSize,
|
||||
SnapshotId: snapshotInfo.Uuid,
|
||||
SourceVolumeId: snapshotInfo.ParentUuid,
|
||||
CreationTime: createTime,
|
||||
ReadyToUse: (snapshotInfo.Status == "Healthy"),
|
||||
},
|
||||
})
|
||||
|
||||
@@ -468,14 +477,14 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi
|
||||
"InvalidArgument: Please check CapacityRange[%v]", capRange)
|
||||
}
|
||||
|
||||
k8sVolume, err := cs.dsmService.ExpandVolume(volumeId, sizeInByte)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if err := cs.dsmService.ExpandLun(volumeId, sizeInByte); err != nil {
|
||||
return nil, status.Error(codes.Internal,
|
||||
fmt.Sprintf("Failed to expand volume [%s], err: %v", volumeId, err))
|
||||
}
|
||||
|
||||
return &csi.ControllerExpandVolumeResponse{
|
||||
CapacityBytes: k8sVolume.SizeInBytes,
|
||||
NodeExpansionRequired: (k8sVolume.Protocol == utils.ProtocolIscsi),
|
||||
CapacityBytes: sizeInByte,
|
||||
NodeExpansionRequired: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -20,16 +20,11 @@ import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/interfaces"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
DriverName = "csi.san.synology.com" // CSI dirver name
|
||||
DriverVersion = "1.1.0"
|
||||
)
|
||||
|
||||
var (
|
||||
supportedProtocolList = []string{utils.ProtocolIscsi, utils.ProtocolSmb}
|
||||
DriverVersion = "1.0.0"
|
||||
)
|
||||
|
||||
type IDriver interface {
|
||||
@@ -78,8 +73,6 @@ func NewControllerAndNodeDriver(nodeID string, endpoint string, dsmService inter
|
||||
d.addNodeServiceCapabilities([]csi.NodeServiceCapability_RPC_Type{
|
||||
csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
|
||||
csi.NodeServiceCapability_RPC_EXPAND_VOLUME,
|
||||
csi.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP,
|
||||
// csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, //TODO
|
||||
})
|
||||
|
||||
log.Infof("New driver created: name=%s, nodeID=%s, version=%s, endpoint=%s", d.name, d.nodeID, d.version, d.endpoint)
|
||||
@@ -134,7 +127,3 @@ func (d *Driver) addNodeServiceCapabilities(nsc []csi.NodeServiceCapability_RPC_
|
||||
func (d *Driver) getVolumeCapabilityAccessModes() []*csi.VolumeCapability_AccessMode { // for debugging
|
||||
return d.vCap
|
||||
}
|
||||
|
||||
func isProtocolSupport(protocol string) bool {
|
||||
return utils.SliceContains(supportedProtocolList, protocol)
|
||||
}
|
||||
@@ -23,22 +23,19 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff/v4"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/mount-utils"
|
||||
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/interfaces"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/models"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
|
||||
)
|
||||
|
||||
type nodeServer struct {
|
||||
Driver *Driver
|
||||
Mounter *mount.SafeFormatAndMount
|
||||
Driver *Driver
|
||||
Mounter *mount.SafeFormatAndMount
|
||||
dsmService interfaces.IDsmService
|
||||
Initiator *initiatorDriver
|
||||
}
|
||||
@@ -125,7 +122,7 @@ func createTargetMountPath(mounter mount.Interface, mountPath string, isBlock bo
|
||||
}
|
||||
|
||||
func (ns *nodeServer) loginTarget(volumeId string) error {
|
||||
k8sVolume := ns.dsmService.GetVolume(volumeId)
|
||||
k8sVolume := ns.dsmService.GetVolume(volumeId);
|
||||
|
||||
if k8sVolume == nil {
|
||||
return status.Error(codes.NotFound, fmt.Sprintf("Volume[%s] is not found", volumeId))
|
||||
@@ -142,182 +139,13 @@ func (ns *nodeServer) loginTarget(volumeId string) error {
|
||||
func (ns *nodeServer) logoutTarget(volumeId string) {
|
||||
k8sVolume := ns.dsmService.GetVolume(volumeId)
|
||||
|
||||
if k8sVolume == nil || k8sVolume.Protocol != utils.ProtocolIscsi {
|
||||
if k8sVolume == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ns.Initiator.logout(k8sVolume.Target.Iqn, k8sVolume.DsmIp)
|
||||
}
|
||||
|
||||
func checkGidPresentInMountFlags(volumeMountGroup string, mountFlags []string) (bool, error) {
|
||||
gidPresentInMountFlags := false
|
||||
for _, mountFlag := range mountFlags {
|
||||
if strings.HasPrefix(mountFlag, "gid") {
|
||||
gidPresentInMountFlags = true
|
||||
kvpair := strings.Split(mountFlag, "=")
|
||||
if volumeMountGroup != "" && len(kvpair) == 2 && !strings.EqualFold(volumeMountGroup, kvpair[1]) {
|
||||
return false, status.Error(codes.InvalidArgument, fmt.Sprintf("gid(%s) in storageClass and pod fsgroup(%s) are not equal", kvpair[1], volumeMountGroup))
|
||||
}
|
||||
}
|
||||
}
|
||||
return gidPresentInMountFlags, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) mountSensitiveWithRetry(sourcePath string, targetPath string, fsType string, options []string, sensitiveOptions []string) error {
|
||||
mountBackoff := backoff.NewExponentialBackOff()
|
||||
mountBackoff.InitialInterval = 1 * time.Second
|
||||
mountBackoff.Multiplier = 2
|
||||
mountBackoff.RandomizationFactor = 0.1
|
||||
mountBackoff.MaxElapsedTime = 5 * time.Second
|
||||
|
||||
checkFinished := func() error {
|
||||
if err := ns.Mounter.MountSensitive(sourcePath, targetPath, fsType, options, sensitiveOptions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
mountNotify := func(err error, duration time.Duration) {
|
||||
log.Infof("Retry MountSensitive, waiting %3.2f seconds .....", float64(duration.Seconds()))
|
||||
}
|
||||
|
||||
if err := backoff.RetryNotify(checkFinished, mountBackoff, mountNotify); err != nil {
|
||||
log.Errorf("Could not finish mount after %3.2f seconds.", float64(mountBackoff.MaxElapsedTime.Seconds()))
|
||||
return err
|
||||
}
|
||||
|
||||
log.Debugf("Mount successfully. source: %s, target: %s", sourcePath, targetPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) setSMBVolumePermission(sourcePath string, userName string, authType utils.AuthType) error {
|
||||
s := strings.Split(strings.TrimPrefix(sourcePath, "//"), "/")
|
||||
if len(s) != 2 {
|
||||
return fmt.Errorf("Failed to parse dsmIp and shareName from source path")
|
||||
}
|
||||
dsmIp, shareName := s[0], s[1]
|
||||
|
||||
dsm, err := ns.dsmService.GetDsm(dsmIp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to get DSM[%s]", dsmIp)
|
||||
}
|
||||
|
||||
permission := webapi.SharePermission{
|
||||
Name: userName,
|
||||
}
|
||||
switch authType {
|
||||
case utils.AuthTypeReadWrite:
|
||||
permission.IsWritable = true
|
||||
case utils.AuthTypeReadOnly:
|
||||
permission.IsReadonly = true
|
||||
case utils.AuthTypeNoAccess:
|
||||
permission.IsDeny = true
|
||||
default:
|
||||
return fmt.Errorf("Unknown auth type: %s", string(authType))
|
||||
}
|
||||
|
||||
permissions := append([]*webapi.SharePermission{}, &permission)
|
||||
spec := webapi.SharePermissionSetSpec{
|
||||
Name: shareName,
|
||||
UserGroupType: models.UserGroupTypeLocalUser,
|
||||
Permissions: permissions,
|
||||
}
|
||||
|
||||
return dsm.SharePermissionSet(spec)
|
||||
}
|
||||
|
||||
func (ns *nodeServer) nodeStageISCSIVolume(ctx context.Context, spec *models.NodeStageVolumeSpec) (*csi.NodeStageVolumeResponse, error) {
|
||||
// if block mode, skip mount
|
||||
if spec.VolumeCapability.GetBlock() != nil {
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
if err := ns.loginTarget(spec.VolumeId); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
volumeMountPath := ns.getVolumeMountPath(spec.VolumeId)
|
||||
if volumeMountPath == "" {
|
||||
return nil, status.Error(codes.Internal, "Can't get volume mount path")
|
||||
}
|
||||
|
||||
notMount, err := ns.Mounter.Interface.IsLikelyNotMountPoint(spec.StagingTargetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if !notMount {
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
fsType := spec.VolumeCapability.GetMount().GetFsType()
|
||||
options := append([]string{"rw"}, spec.VolumeCapability.GetMount().GetMountFlags()...)
|
||||
|
||||
if err = ns.Mounter.FormatAndMount(volumeMountPath, spec.StagingTargetPath, fsType, options); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) nodeStageSMBVolume(ctx context.Context, spec *models.NodeStageVolumeSpec, secrets map[string]string) (*csi.NodeStageVolumeResponse, error) {
|
||||
if spec.VolumeCapability.GetBlock() != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("SMB protocol only allows 'mount' access type"))
|
||||
}
|
||||
|
||||
if spec.Source == "" { //"//<host>/<shareName>"
|
||||
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Missing 'source' field"))
|
||||
}
|
||||
|
||||
if secrets == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Missing secrets for node staging volume"))
|
||||
}
|
||||
|
||||
username := strings.TrimSpace(secrets["username"])
|
||||
password := strings.TrimSpace(secrets["password"])
|
||||
domain := strings.TrimSpace(secrets["domain"])
|
||||
|
||||
// set permission to access the share
|
||||
if err := ns.setSMBVolumePermission(spec.Source, username, utils.AuthTypeReadWrite); err != nil {
|
||||
return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to set permission, source: %s, err: %v", spec.Source, err))
|
||||
}
|
||||
|
||||
// create mount point if not exists
|
||||
targetPath := spec.StagingTargetPath
|
||||
notMount, err := createTargetMountPath(ns.Mounter.Interface, targetPath, false)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if !notMount {
|
||||
log.Infof("NodeStageVolume: %s is already mounted", targetPath)
|
||||
return &csi.NodeStageVolumeResponse{}, nil // already mount
|
||||
}
|
||||
|
||||
fsType := "cifs"
|
||||
options := spec.VolumeCapability.GetMount().GetMountFlags()
|
||||
|
||||
volumeMountGroup := spec.VolumeCapability.GetMount().GetVolumeMountGroup()
|
||||
gidPresent, err := checkGidPresentInMountFlags(volumeMountGroup, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !gidPresent && volumeMountGroup != "" {
|
||||
options = append(options, fmt.Sprintf("gid=%s", volumeMountGroup))
|
||||
}
|
||||
|
||||
|
||||
if domain != "" {
|
||||
options = append(options, fmt.Sprintf("%s=%s", "domain", domain))
|
||||
}
|
||||
var sensitiveOptions = []string{fmt.Sprintf("%s=%s,%s=%s", "username", username, "password", password)}
|
||||
if err := ns.mountSensitiveWithRetry(spec.Source, targetPath, fsType, options, sensitiveOptions); err != nil {
|
||||
return nil, status.Error(codes.Internal,
|
||||
fmt.Sprintf("Volume[%s] failed to mount %q on %q. err: %v", spec.VolumeId, spec.Source, targetPath, err))
|
||||
}
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
|
||||
volumeId, stagingTargetPath, volumeCapability :=
|
||||
req.GetVolumeId(), req.GetStagingTargetPath(), req.GetVolumeCapability()
|
||||
@@ -327,32 +155,47 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
|
||||
"InvalidArgument: Please check volume ID, staging target path and volume capability.")
|
||||
}
|
||||
|
||||
if volumeCapability.GetBlock() != nil && volumeCapability.GetMount() != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "Cannot mix block and mount capabilities")
|
||||
// if block mode, skip mount
|
||||
if volumeCapability.GetBlock() != nil {
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
spec := &models.NodeStageVolumeSpec{
|
||||
VolumeId: volumeId,
|
||||
StagingTargetPath: stagingTargetPath,
|
||||
VolumeCapability: volumeCapability,
|
||||
Dsm: req.VolumeContext["dsm"],
|
||||
Source: req.VolumeContext["source"], // filled by CreateVolume response
|
||||
if err := ns.loginTarget(volumeId); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
switch req.VolumeContext["protocol"] {
|
||||
case utils.ProtocolSmb:
|
||||
return ns.nodeStageSMBVolume(ctx, spec, req.GetSecrets())
|
||||
default:
|
||||
return ns.nodeStageISCSIVolume(ctx, spec)
|
||||
volumeMountPath := ns.getVolumeMountPath(volumeId)
|
||||
if volumeMountPath == "" {
|
||||
return nil, status.Error(codes.Internal, "Can't get volume mount path")
|
||||
}
|
||||
|
||||
notMount, err := ns.Mounter.Interface.IsLikelyNotMountPoint(stagingTargetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if !notMount {
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
fsType := volumeCapability.GetMount().GetFsType()
|
||||
mountFlags := volumeCapability.GetMount().GetMountFlags()
|
||||
options := append([]string{"rw"}, mountFlags...)
|
||||
|
||||
if err = ns.Mounter.FormatAndMount(volumeMountPath, stagingTargetPath, fsType, options); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
|
||||
volumeID, stagingTargetPath := req.GetVolumeId(), req.GetStagingTargetPath()
|
||||
|
||||
if volumeID == "" {
|
||||
if req.GetVolumeId() == "" { // Useless, just for sanity check
|
||||
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
|
||||
}
|
||||
|
||||
stagingTargetPath := req.GetStagingTargetPath()
|
||||
|
||||
if stagingTargetPath == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
@@ -368,26 +211,18 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
|
||||
}
|
||||
}
|
||||
|
||||
ns.logoutTarget(volumeID)
|
||||
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
||||
volumeId, targetPath, stagingTargetPath := req.GetVolumeId(), req.GetTargetPath(), req.GetStagingTargetPath()
|
||||
isBlock := req.GetVolumeCapability().GetBlock() != nil
|
||||
|
||||
if volumeId == "" || targetPath == "" || stagingTargetPath == "" {
|
||||
return nil, status.Error(codes.InvalidArgument,
|
||||
"InvalidArgument: Please check volume ID, target path and staging target path.")
|
||||
}
|
||||
|
||||
if req.GetVolumeCapability() == nil {
|
||||
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
|
||||
}
|
||||
|
||||
isBlock := req.GetVolumeCapability().GetBlock() != nil // raw block, only for iscsi protocol
|
||||
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
||||
|
||||
notMount, err := createTargetMountPath(ns.Mounter.Interface, targetPath, isBlock)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@@ -396,51 +231,45 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
if err := ns.loginTarget(volumeId); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
volumeMountPath := ns.getVolumeMountPath(volumeId)
|
||||
if volumeMountPath == "" {
|
||||
return nil, status.Error(codes.Internal, "Can't get volume mount path")
|
||||
}
|
||||
|
||||
options := []string{"bind"}
|
||||
if req.GetReadonly() {
|
||||
options = append(options, "ro")
|
||||
}
|
||||
|
||||
switch req.VolumeContext["protocol"] {
|
||||
case utils.ProtocolSmb:
|
||||
if err := ns.Mounter.Interface.Mount(stagingTargetPath, targetPath, "", options); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
default:
|
||||
if err := ns.loginTarget(volumeId); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if isBlock {
|
||||
err = ns.Mounter.Interface.Mount(volumeMountPath, targetPath, "", options)
|
||||
} else {
|
||||
fsType := req.GetVolumeCapability().GetMount().GetFsType()
|
||||
err = ns.Mounter.Interface.Mount(stagingTargetPath, targetPath, fsType, options)
|
||||
}
|
||||
|
||||
volumeMountPath := ns.getVolumeMountPath(volumeId)
|
||||
if volumeMountPath == "" {
|
||||
return nil, status.Error(codes.Internal, "Can't get volume mount path")
|
||||
}
|
||||
|
||||
if isBlock {
|
||||
err = ns.Mounter.Interface.Mount(volumeMountPath, targetPath, "", options)
|
||||
} else {
|
||||
err = ns.Mounter.Interface.Mount(stagingTargetPath, targetPath, fsType, options)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
||||
if req.GetVolumeId() == "" { // Not needed, but still a mandatory field
|
||||
volumeId, targetPath := req.GetVolumeId(), req.GetTargetPath()
|
||||
if volumeId == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
|
||||
}
|
||||
|
||||
targetPath := req.GetTargetPath()
|
||||
if targetPath == "" {
|
||||
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
|
||||
}
|
||||
|
||||
if _, err := os.Stat(targetPath); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if os.IsNotExist(err){
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
@@ -456,6 +285,24 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
needToLogout := true
|
||||
|
||||
list, err := ns.Mounter.Interface.GetMountRefs(targetPath)
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
for _, path := range list {
|
||||
filePrefix := "/var/lib/kubelet/pods/"
|
||||
blkPrefix := "/var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/publish/"
|
||||
|
||||
if strings.HasPrefix(path, filePrefix) || strings.HasPrefix(path, blkPrefix) {
|
||||
needToLogout = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err := ns.Mounter.Interface.Unmount(targetPath); err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@@ -464,6 +311,10 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
|
||||
return nil, status.Errorf(codes.Internal, "Failed to remove target path.")
|
||||
}
|
||||
|
||||
if needToLogout {
|
||||
ns.logoutTarget(volumeId)
|
||||
}
|
||||
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
@@ -499,19 +350,8 @@ func (ns *nodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVo
|
||||
fmt.Sprintf("Volume[%s] does not exist on the %s", volumeId, volumePath))
|
||||
}
|
||||
|
||||
|
||||
if k8sVolume.Protocol == utils.ProtocolSmb {
|
||||
return &csi.NodeGetVolumeStatsResponse{
|
||||
Usage: []*csi.VolumeUsage{
|
||||
&csi.VolumeUsage{
|
||||
Total: k8sVolume.SizeInBytes,
|
||||
Unit: csi.VolumeUsage_BYTES,
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
lun := k8sVolume.Lun
|
||||
|
||||
return &csi.NodeGetVolumeStatsResponse{
|
||||
Usage: []*csi.VolumeUsage{
|
||||
&csi.VolumeUsage{
|
||||
@@ -536,11 +376,6 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV
|
||||
return nil, status.Error(codes.NotFound, fmt.Sprintf("Volume[%s] is not found", volumeId))
|
||||
}
|
||||
|
||||
if k8sVolume.Protocol == utils.ProtocolSmb {
|
||||
return &csi.NodeExpandVolumeResponse{
|
||||
CapacityBytes: sizeInByte}, nil
|
||||
}
|
||||
|
||||
if err := ns.Initiator.rescan(k8sVolume.Target.Iqn); err != nil {
|
||||
return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to rescan. err: %v", err))
|
||||
}
|
||||
@@ -565,4 +400,4 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV
|
||||
}
|
||||
return &csi.NodeExpandVolumeResponse{
|
||||
CapacityBytes: sizeInByte}, nil
|
||||
}
|
||||
}
|
||||
@@ -149,11 +149,11 @@ func getLunTypeByInputParams(lunType string, isThin bool, locationFsType string)
|
||||
return "", fmt.Errorf("Unknown volume fs type: %s", locationFsType)
|
||||
}
|
||||
|
||||
func (service *DsmService) createMappingTarget(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, lunUuid string) (webapi.TargetInfo, error) {
|
||||
func (service *DsmService) createMappingTarget(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, lunUuid string) error {
|
||||
dsmInfo, err := dsm.DsmInfoGet()
|
||||
|
||||
if err != nil {
|
||||
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s] info", dsm.Ip));
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s] info", dsm.Ip));
|
||||
}
|
||||
|
||||
genTargetIqn := func() string {
|
||||
@@ -175,35 +175,35 @@ func (service *DsmService) createMappingTarget(dsm *webapi.DSM, spec *models.Cre
|
||||
targetId, err := dsm.TargetCreate(targetSpec)
|
||||
|
||||
if err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to create target with spec: %v, err: %v", targetSpec, err))
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to create target with spec: %v, err: %v", targetSpec, err))
|
||||
}
|
||||
|
||||
targetInfo, err := dsm.TargetGet(targetSpec.Name)
|
||||
if err != nil {
|
||||
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get target with spec: %v, err: %v", targetSpec, err))
|
||||
if targetInfo, err := dsm.TargetGet(targetSpec.Name); err != nil {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to get target with spec: %v, err: %v", targetSpec, err))
|
||||
} else {
|
||||
targetId = strconv.Itoa(targetInfo.TargetId);
|
||||
}
|
||||
|
||||
if spec.MultipleSession == true {
|
||||
if err := dsm.TargetSet(targetId, 0); err != nil {
|
||||
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to set target [%s] max session, err: %v", spec.TargetName, err))
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to set target [%s] max session, err: %v", spec.TargetName, err))
|
||||
}
|
||||
}
|
||||
|
||||
if err := dsm.LunMapTarget([]string{targetId}, lunUuid); err != nil {
|
||||
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to map target [%s] to lun [%s], err: %v", spec.TargetName, lunUuid, err))
|
||||
err = dsm.LunMapTarget([]string{targetId}, lunUuid)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to map target [%s] to lun [%s], err: %v", spec.TargetName, lunUuid, err))
|
||||
}
|
||||
|
||||
return targetInfo, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) createVolumeByDsm(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec) (*models.K8sVolumeRespSpec, error) {
|
||||
func (service *DsmService) createVolumeByDsm(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec) (webapi.LunInfo, error) {
|
||||
// 1. Find a available location
|
||||
if spec.Location == "" {
|
||||
vol, err := service.getFirstAvailableVolume(dsm, spec.Size)
|
||||
if err != nil {
|
||||
return nil,
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get available location, err: %v", err))
|
||||
}
|
||||
spec.Location = vol.Path
|
||||
@@ -212,13 +212,13 @@ func (service *DsmService) createVolumeByDsm(dsm *webapi.DSM, spec *models.Creat
|
||||
// 2. Check if location exists
|
||||
dsmVolInfo, err := dsm.VolumeGet(spec.Location)
|
||||
if err != nil {
|
||||
return nil,
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.InvalidArgument, fmt.Sprintf("Unable to find location %s", spec.Location))
|
||||
}
|
||||
|
||||
lunType, err := getLunTypeByInputParams(spec.Type, spec.ThinProvisioning, dsmVolInfo.FsType)
|
||||
if err != nil {
|
||||
return nil,
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.InvalidArgument, fmt.Sprintf("Unknown volume fs type: %s, location: %s", dsmVolInfo.FsType, spec.Location))
|
||||
}
|
||||
|
||||
@@ -234,29 +234,29 @@ func (service *DsmService) createVolumeByDsm(dsm *webapi.DSM, spec *models.Creat
|
||||
_, err = dsm.LunCreate(lunSpec)
|
||||
|
||||
if err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create LUN, err: %v", err))
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create Volume with name: %s, err: %v", spec.K8sVolumeName, err))
|
||||
}
|
||||
|
||||
// No matter lun existed or not, Get Lun by name
|
||||
lunInfo, err := dsm.LunGet(spec.LunName)
|
||||
if err != nil {
|
||||
return nil,
|
||||
return webapi.LunInfo{},
|
||||
// discussion with log
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed LUN with name: %s, err: %v", spec.LunName, err))
|
||||
}
|
||||
|
||||
// 4. Create Target and Map to Lun
|
||||
targetInfo, err := service.createMappingTarget(dsm, spec, lunInfo.Uuid)
|
||||
err = service.createMappingTarget(dsm, spec, lunInfo.Uuid)
|
||||
if err != nil {
|
||||
// FIXME need to delete lun and target
|
||||
return nil,
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create and map target, err: %v", err))
|
||||
}
|
||||
|
||||
log.Debugf("[%s] CreateVolume Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid)
|
||||
log.Debugf("[%s] CreateVolume Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid);
|
||||
|
||||
return DsmLunToK8sVolume(dsm.Ip, lunInfo, targetInfo), nil
|
||||
return lunInfo, nil
|
||||
}
|
||||
|
||||
func waitCloneFinished(dsm *webapi.DSM, lunName string) error {
|
||||
@@ -291,47 +291,47 @@ func waitCloneFinished(dsm *webapi.DSM, lunName string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) createVolumeBySnapshot(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcSnapshot *models.K8sSnapshotRespSpec) (*models.K8sVolumeRespSpec, error) {
|
||||
if spec.Size != 0 && spec.Size != srcSnapshot.SizeInBytes {
|
||||
return nil, status.Errorf(codes.OutOfRange, "Requested lun size [%d] is not equal to snapshot size [%d]", spec.Size, srcSnapshot.SizeInBytes)
|
||||
func (service *DsmService) createVolumeBySnapshot(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, snapshotInfo webapi.SnapshotInfo) (webapi.LunInfo, error) {
|
||||
if spec.Size != 0 && spec.Size != snapshotInfo.TotalSize {
|
||||
return webapi.LunInfo{}, status.Errorf(codes.OutOfRange, "Lun size [%d] is not equal to snapshot size [%d]", spec.Size, snapshotInfo.TotalSize)
|
||||
}
|
||||
|
||||
snapshotCloneSpec := webapi.SnapshotCloneSpec{
|
||||
Name: spec.LunName,
|
||||
SrcLunUuid: srcSnapshot.ParentUuid,
|
||||
SrcSnapshotUuid: srcSnapshot.Uuid,
|
||||
SrcLunUuid: snapshotInfo.ParentUuid,
|
||||
SrcSnapshotUuid: snapshotInfo.Uuid,
|
||||
}
|
||||
|
||||
if _, err := dsm.SnapshotClone(snapshotCloneSpec); err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source snapshot ID: %s, err: %v", srcSnapshot.Uuid, err))
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source snapshot ID: %s, err: %v", snapshotInfo.Uuid, err))
|
||||
}
|
||||
|
||||
if err := waitCloneFinished(dsm, spec.LunName); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
return webapi.LunInfo{}, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
lunInfo, err := dsm.LunGet(spec.LunName)
|
||||
if err != nil {
|
||||
return nil,
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed LUN with name: %s, err: %v", spec.LunName, err))
|
||||
}
|
||||
|
||||
targetInfo, err := service.createMappingTarget(dsm, spec, lunInfo.Uuid)
|
||||
err = service.createMappingTarget(dsm, spec, lunInfo.Uuid)
|
||||
if err != nil {
|
||||
// FIXME need to delete lun and target
|
||||
return nil,
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create and map target, err: %v", err))
|
||||
}
|
||||
|
||||
log.Debugf("[%s] createVolumeBySnapshot Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid)
|
||||
log.Debugf("[%s] createVolumeBySnapshot Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid);
|
||||
|
||||
return DsmLunToK8sVolume(dsm.Ip, lunInfo, targetInfo), nil
|
||||
return lunInfo, nil
|
||||
}
|
||||
|
||||
func (service *DsmService) createVolumeByVolume(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcLunInfo webapi.LunInfo) (*models.K8sVolumeRespSpec, error) {
|
||||
func (service *DsmService) createVolumeByVolume(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcLunInfo webapi.LunInfo) (webapi.LunInfo, error) {
|
||||
if spec.Size != 0 && spec.Size != int64(srcLunInfo.Size) {
|
||||
return nil, status.Errorf(codes.OutOfRange, "Requested lun size [%d] is not equal to src lun size [%d]", spec.Size, srcLunInfo.Size)
|
||||
return webapi.LunInfo{}, status.Errorf(codes.OutOfRange, "Lun size [%d] is not equal to src lun size [%d]", spec.Size, srcLunInfo.Size)
|
||||
}
|
||||
|
||||
if spec.Location == "" {
|
||||
@@ -345,305 +345,255 @@ func (service *DsmService) createVolumeByVolume(dsm *webapi.DSM, spec *models.Cr
|
||||
}
|
||||
|
||||
if _, err := dsm.LunClone(lunCloneSpec); err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return nil,
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source volume ID: %s, err: %v", srcLunInfo.Uuid, err))
|
||||
}
|
||||
|
||||
if err := waitCloneFinished(dsm, spec.LunName); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
return webapi.LunInfo{}, status.Errorf(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
lunInfo, err := dsm.LunGet(spec.LunName)
|
||||
if err != nil {
|
||||
return nil,
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed LUN with name: %s, err: %v", spec.LunName, err))
|
||||
}
|
||||
|
||||
targetInfo, err := service.createMappingTarget(dsm, spec, lunInfo.Uuid)
|
||||
err = service.createMappingTarget(dsm, spec, lunInfo.Uuid)
|
||||
if err != nil {
|
||||
// FIXME need to delete lun and target
|
||||
return nil,
|
||||
return webapi.LunInfo{},
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create and map target, err: %v", err))
|
||||
}
|
||||
|
||||
log.Debugf("[%s] createVolumeByVolume Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid)
|
||||
log.Debugf("[%s] createVolumeByVolume Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid);
|
||||
|
||||
return DsmLunToK8sVolume(dsm.Ip, lunInfo, targetInfo), nil
|
||||
return lunInfo, nil
|
||||
}
|
||||
|
||||
func DsmShareToK8sVolume(dsmIp string, info webapi.ShareInfo) *models.K8sVolumeRespSpec {
|
||||
return &models.K8sVolumeRespSpec{
|
||||
DsmIp: dsmIp,
|
||||
VolumeId: info.Uuid,
|
||||
SizeInBytes: utils.MBToBytes(info.QuotaValueInMB),
|
||||
Location: info.VolPath,
|
||||
Name: info.Name,
|
||||
Source: "//" + dsmIp + "/" + info.Name,
|
||||
Protocol: utils.ProtocolSmb,
|
||||
Share: info,
|
||||
}
|
||||
}
|
||||
|
||||
func DsmLunToK8sVolume(dsmIp string, info webapi.LunInfo, targetInfo webapi.TargetInfo) *models.K8sVolumeRespSpec {
|
||||
return &models.K8sVolumeRespSpec{
|
||||
DsmIp: dsmIp,
|
||||
VolumeId: info.Uuid,
|
||||
SizeInBytes: int64(info.Size),
|
||||
Location: info.Location,
|
||||
Name: info.Name,
|
||||
Source: "",
|
||||
Protocol: utils.ProtocolIscsi,
|
||||
Lun: info,
|
||||
Target: targetInfo,
|
||||
}
|
||||
}
|
||||
|
||||
func (service *DsmService) CreateVolume(spec *models.CreateK8sVolumeSpec) (*models.K8sVolumeRespSpec, error) {
|
||||
func (service *DsmService) CreateVolume(spec *models.CreateK8sVolumeSpec) (webapi.LunInfo, string, error) {
|
||||
if spec.SourceVolumeId != "" {
|
||||
/* Create volume by exists volume (Clone) */
|
||||
k8sVolume := service.GetVolume(spec.SourceVolumeId)
|
||||
if k8sVolume == nil {
|
||||
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("No such volume id: %s", spec.SourceVolumeId))
|
||||
return webapi.LunInfo{}, "", status.Errorf(codes.NotFound, fmt.Sprintf("No such volume id: %s", spec.SourceVolumeId))
|
||||
}
|
||||
|
||||
dsm, err := service.GetDsm(k8sVolume.DsmIp)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
|
||||
return webapi.LunInfo{}, "", status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
|
||||
}
|
||||
|
||||
if spec.Protocol == utils.ProtocolIscsi {
|
||||
return service.createVolumeByVolume(dsm, spec, k8sVolume.Lun)
|
||||
} else if spec.Protocol == utils.ProtocolSmb {
|
||||
return service.createSMBVolumeByVolume(dsm, spec, k8sVolume.Share)
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, "Unknown protocol")
|
||||
}
|
||||
|
||||
if spec.SourceSnapshotId != "" {
|
||||
lunInfo, err := service.createVolumeByVolume(dsm, spec, k8sVolume.Lun)
|
||||
return lunInfo, dsm.Ip, err
|
||||
} else if spec.SourceSnapshotId != "" {
|
||||
/* Create volume by snapshot */
|
||||
snapshot := service.GetSnapshotByUuid(spec.SourceSnapshotId)
|
||||
if snapshot == nil {
|
||||
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("No such snapshot id: %s", spec.SourceSnapshotId))
|
||||
}
|
||||
for _, dsm := range service.dsms {
|
||||
snapshotInfo, err := dsm.SnapshotGet(spec.SourceSnapshotId)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// found source by snapshot id, check allowable
|
||||
if spec.DsmIp != "" && spec.DsmIp != snapshot.DsmIp {
|
||||
msg := fmt.Sprintf("The source PVC and destination PVCs must be on the same DSM for cloning from snapshots. Source is on %s, but new PVC is on %s",
|
||||
snapshot.DsmIp, spec.DsmIp)
|
||||
return nil, status.Errorf(codes.InvalidArgument, msg)
|
||||
lunInfo, err := service.createVolumeBySnapshot(dsm, spec, snapshotInfo)
|
||||
return lunInfo, dsm.Ip, err
|
||||
}
|
||||
if spec.Location != "" && spec.Location != snapshot.RootPath {
|
||||
msg := fmt.Sprintf("The source PVC and destination PVCs must be on the same location for cloning from snapshots. Source is on %s, but new PVC is on %s",
|
||||
snapshot.RootPath, spec.Location)
|
||||
return nil, status.Errorf(codes.InvalidArgument, msg)
|
||||
}
|
||||
if spec.Protocol != snapshot.Protocol {
|
||||
msg := fmt.Sprintf("The source PVC and destination PVCs shouldn't have different protocols. Source is %s, but new PVC is %s",
|
||||
snapshot.Protocol, spec.Protocol)
|
||||
return nil, status.Errorf(codes.InvalidArgument, msg)
|
||||
}
|
||||
|
||||
dsm, err := service.GetDsm(snapshot.DsmIp)
|
||||
return webapi.LunInfo{}, "", status.Errorf(codes.NotFound, fmt.Sprintf("No such snapshot id: %s", spec.SourceSnapshotId))
|
||||
} else if spec.DsmIp != "" {
|
||||
/* Create volume by specific dsm ip */
|
||||
dsm, err := service.GetDsm(spec.DsmIp)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", snapshot.DsmIp))
|
||||
return webapi.LunInfo{}, "", status.Errorf(codes.Internal, fmt.Sprintf("%v", err))
|
||||
}
|
||||
|
||||
if spec.Protocol == utils.ProtocolIscsi {
|
||||
return service.createVolumeBySnapshot(dsm, spec, snapshot)
|
||||
} else if spec.Protocol == utils.ProtocolSmb {
|
||||
return service.createSMBVolumeBySnapshot(dsm, spec, snapshot)
|
||||
lunInfo, err := service.createVolumeByDsm(dsm, spec)
|
||||
return lunInfo, dsm.Ip, err
|
||||
} else {
|
||||
/* Find appropriate dsm to create volume */
|
||||
for _, dsm := range service.dsms {
|
||||
lunInfo, err := service.createVolumeByDsm(dsm, spec)
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to create Volume: %v", dsm.Ip, err)
|
||||
continue
|
||||
}
|
||||
return lunInfo, dsm.Ip, nil
|
||||
}
|
||||
return nil, status.Error(codes.InvalidArgument, "Unknown protocol")
|
||||
return webapi.LunInfo{}, "", status.Errorf(codes.Internal, fmt.Sprintf("Couldn't find any host available to create Volume"))
|
||||
}
|
||||
|
||||
/* Find appropriate dsm to create volume */
|
||||
for _, dsm := range service.dsms {
|
||||
if spec.DsmIp != "" && spec.DsmIp != dsm.Ip {
|
||||
continue
|
||||
}
|
||||
|
||||
var k8sVolume *models.K8sVolumeRespSpec
|
||||
var err error
|
||||
if spec.Protocol == utils.ProtocolIscsi {
|
||||
k8sVolume, err = service.createVolumeByDsm(dsm, spec)
|
||||
} else if spec.Protocol == utils.ProtocolSmb {
|
||||
k8sVolume, err = service.createSMBVolumeByDsm(dsm, spec)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to create Volume: %v", dsm.Ip, err)
|
||||
continue
|
||||
}
|
||||
|
||||
return k8sVolume, nil
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Couldn't find any host available to create Volume"))
|
||||
}
|
||||
|
||||
func (service *DsmService) DeleteVolume(volId string) error {
|
||||
k8sVolume := service.GetVolume(volId)
|
||||
func (service *DsmService) DeleteVolume(volumeId string) error {
|
||||
k8sVolume := service.GetVolume(volumeId)
|
||||
if k8sVolume == nil {
|
||||
log.Infof("Skip delete volume[%s] that is no exist", volId)
|
||||
log.Infof("Skip delete volume[%s] that is no exist", volumeId)
|
||||
return nil
|
||||
}
|
||||
|
||||
lun, target := k8sVolume.Lun, k8sVolume.Target
|
||||
dsm, err := service.GetDsm(k8sVolume.DsmIp)
|
||||
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
|
||||
}
|
||||
|
||||
if err := dsm.LunDelete(lun.Uuid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(target.MappedLuns) != 1 {
|
||||
log.Infof("Skip deletes target[%s] that was mapped with lun. DSM[%s]", target.Name, dsm.Ip)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := dsm.TargetDelete(strconv.Itoa(target.TargetId)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) listVolumesByDsm(dsm *webapi.DSM, infos *[]*models.ListK8sVolumeRespSpec) {
|
||||
targetInfos, err := dsm.TargetList()
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to list targets: %v", dsm.Ip, err)
|
||||
}
|
||||
|
||||
for _, target := range targetInfos {
|
||||
// TODO: use target.ConnectedSessions to filter targets
|
||||
for _, mapping := range target.MappedLuns {
|
||||
lun, err := dsm.LunGet(mapping.LunUuid)
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to get LUN(%s): %v", dsm.Ip, mapping.LunUuid, err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(lun.Name, models.LunPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
*infos = append(*infos, &models.ListK8sVolumeRespSpec{
|
||||
DsmIp: dsm.Ip,
|
||||
Target: target,
|
||||
Lun: lun,
|
||||
})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (service *DsmService) ListVolumes() []*models.ListK8sVolumeRespSpec {
|
||||
var infos []*models.ListK8sVolumeRespSpec
|
||||
|
||||
for _, dsm := range service.dsms {
|
||||
service.listVolumesByDsm(dsm, &infos)
|
||||
}
|
||||
|
||||
return infos
|
||||
}
|
||||
|
||||
func (service *DsmService) GetVolume(lunUuid string) *models.ListK8sVolumeRespSpec {
|
||||
volumes := service.ListVolumes()
|
||||
|
||||
for _, volume := range volumes {
|
||||
if volume.Lun.Uuid == lunUuid {
|
||||
return volume
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) ExpandLun(lunUuid string, newSize int64) error {
|
||||
k8sVolume := service.GetVolume(lunUuid);
|
||||
if k8sVolume == nil {
|
||||
return status.Errorf(codes.InvalidArgument, fmt.Sprintf("Can't find volume[%s].", lunUuid))
|
||||
}
|
||||
|
||||
if int64(k8sVolume.Lun.Size) > newSize {
|
||||
return status.Errorf(codes.InvalidArgument,
|
||||
fmt.Sprintf("Failed to expand volume[%s], because expand size[%d] bigger than before[%d].",
|
||||
lunUuid, newSize, k8sVolume.Lun.Size))
|
||||
}
|
||||
|
||||
spec := webapi.LunUpdateSpec{
|
||||
Uuid: lunUuid,
|
||||
NewSize: uint64(newSize),
|
||||
}
|
||||
|
||||
dsm, err := service.GetDsm(k8sVolume.DsmIp)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
|
||||
}
|
||||
|
||||
if k8sVolume.Protocol == utils.ProtocolSmb {
|
||||
if err := dsm.ShareDelete(k8sVolume.Share.Name); err != nil {
|
||||
log.Errorf("[%s] Failed to delete Share(%s): %v", dsm.Ip, k8sVolume.Share.Name, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
lun, target := k8sVolume.Lun, k8sVolume.Target
|
||||
|
||||
if err := dsm.LunDelete(lun.Uuid); err != nil {
|
||||
if _, err := dsm.LunGet(lun.Uuid); err != nil && errors.Is(err, utils.NoSuchLunError("")) {
|
||||
return nil
|
||||
}
|
||||
log.Errorf("[%s] Failed to delete LUN(%s): %v", dsm.Ip, lun.Uuid, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(target.MappedLuns) != 1 {
|
||||
log.Infof("Skip deletes target[%s] that was mapped with lun. DSM[%s]", target.Name, dsm.Ip)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := dsm.TargetDelete(strconv.Itoa(target.TargetId)); err != nil {
|
||||
if _, err := dsm.TargetGet(strconv.Itoa(target.TargetId)); err != nil {
|
||||
return nil
|
||||
}
|
||||
log.Errorf("[%s] Failed to delete target(%d): %v", dsm.Ip, target.TargetId, err)
|
||||
return err
|
||||
}
|
||||
if err := dsm.LunUpdate(spec); err != nil {
|
||||
return status.Errorf(codes.InvalidArgument,
|
||||
fmt.Sprintf("Failed to expand volume[%s]. err: %v", lunUuid, err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) listISCSIVolumes(dsmIp string) (infos []*models.K8sVolumeRespSpec) {
|
||||
for _, dsm := range service.dsms {
|
||||
if dsmIp != "" && dsmIp != dsm.Ip {
|
||||
continue
|
||||
}
|
||||
|
||||
targetInfos, err := dsm.TargetList()
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to list targets: %v", dsm.Ip, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, target := range targetInfos {
|
||||
// TODO: use target.ConnectedSessions to filter targets
|
||||
for _, mapping := range target.MappedLuns {
|
||||
lun, err := dsm.LunGet(mapping.LunUuid)
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to get LUN(%s): %v", dsm.Ip, mapping.LunUuid, err)
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(lun.Name, models.LunPrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
// FIXME: filter same LUN mapping to two target
|
||||
infos = append(infos, DsmLunToK8sVolume(dsm.Ip, lun, target))
|
||||
}
|
||||
}
|
||||
}
|
||||
return infos
|
||||
}
|
||||
|
||||
func (service *DsmService) ListVolumes() (infos []*models.K8sVolumeRespSpec) {
|
||||
infos = append(infos, service.listISCSIVolumes("")...)
|
||||
infos = append(infos, service.listSMBVolumes("")...)
|
||||
|
||||
return infos
|
||||
}
|
||||
|
||||
func (service *DsmService) GetVolume(volId string) *models.K8sVolumeRespSpec {
|
||||
volumes := service.ListVolumes()
|
||||
for _, volume := range volumes {
|
||||
if volume.VolumeId == volId {
|
||||
return volume
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) GetVolumeByName(volName string) *models.K8sVolumeRespSpec {
|
||||
volumes := service.ListVolumes()
|
||||
for _, volume := range volumes {
|
||||
if volume.Name == models.GenLunName(volName) ||
|
||||
volume.Name == models.GenShareName(volName) {
|
||||
return volume
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) GetSnapshotByName(snapshotName string) *models.K8sSnapshotRespSpec {
|
||||
snaps := service.ListAllSnapshots()
|
||||
for _, snap := range snaps {
|
||||
if snap.Name == snapshotName {
|
||||
return snap
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) ExpandVolume(volId string, newSize int64) (*models.K8sVolumeRespSpec, error) {
|
||||
k8sVolume := service.GetVolume(volId);
|
||||
func (service *DsmService) CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSpec) (string, error) {
|
||||
k8sVolume := service.GetVolume(spec.K8sVolumeId);
|
||||
if k8sVolume == nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Can't find volume[%s].", volId))
|
||||
return "", status.Errorf(codes.NotFound, fmt.Sprintf("Can't find volume[%s].", spec.K8sVolumeId))
|
||||
}
|
||||
|
||||
if k8sVolume.SizeInBytes > newSize {
|
||||
return nil, status.Errorf(codes.InvalidArgument,
|
||||
fmt.Sprintf("Failed to expand volume[%s], because expand size[%d] smaller than before[%d].",
|
||||
volId, newSize, k8sVolume.SizeInBytes))
|
||||
snapshotSpec := webapi.SnapshotCreateSpec{
|
||||
Name: spec.SnapshotName,
|
||||
LunUuid: spec.K8sVolumeId,
|
||||
Description: spec.Description,
|
||||
TakenBy: spec.TakenBy,
|
||||
IsLocked: spec.IsLocked,
|
||||
}
|
||||
|
||||
dsm, err := service.GetDsm(k8sVolume.DsmIp)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
|
||||
return "", status.Errorf(codes.InvalidArgument, fmt.Sprintf("Failed to get dsm: %v", err))
|
||||
}
|
||||
|
||||
if k8sVolume.Protocol == utils.ProtocolSmb {
|
||||
newSizeInMB := utils.BytesToMBCeil(newSize) // round up to MB
|
||||
if err := dsm.SetShareQuota(k8sVolume.Share, newSizeInMB); err != nil {
|
||||
log.Errorf("[%s] Failed to set quota [%d (MB)] to Share [%s]: %v",
|
||||
dsm.Ip, newSizeInMB, k8sVolume.Share.Name, err)
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to expand volume[%s]. err: %v", volId, err))
|
||||
}
|
||||
// convert MB to bytes, may be diff from the input newSize
|
||||
k8sVolume.SizeInBytes = utils.MBToBytes(newSizeInMB)
|
||||
} else {
|
||||
spec := webapi.LunUpdateSpec{
|
||||
Uuid: volId,
|
||||
NewSize: uint64(newSize),
|
||||
}
|
||||
if err := dsm.LunUpdate(spec); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to expand volume[%s]. err: %v", volId, err))
|
||||
}
|
||||
k8sVolume.SizeInBytes = newSize
|
||||
snapshotUuid, err := dsm.SnapshotCreate(snapshotSpec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return k8sVolume, nil
|
||||
return snapshotUuid, nil
|
||||
}
|
||||
|
||||
func (service *DsmService) CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSpec) (*models.K8sSnapshotRespSpec, error) {
|
||||
srcVolId := spec.K8sVolumeId
|
||||
func (service *DsmService) DeleteSnapshot(snapshotUuid string) error {
|
||||
for _, dsm := range service.dsms {
|
||||
_, err := dsm.SnapshotGet(snapshotUuid)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
k8sVolume := service.GetVolume(srcVolId);
|
||||
err = dsm.SnapshotDelete(snapshotUuid)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to delete snapshot [%s]. err: %v", snapshotUuid, err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) ListAllSnapshots() ([]webapi.SnapshotInfo, error) {
|
||||
var allInfos []webapi.SnapshotInfo
|
||||
|
||||
for _, dsm := range service.dsms {
|
||||
infos, err := dsm.SnapshotList("")
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
allInfos = append(allInfos, infos...)
|
||||
}
|
||||
|
||||
return allInfos, nil
|
||||
}
|
||||
|
||||
func (service *DsmService) ListSnapshots(lunUuid string) ([]webapi.SnapshotInfo, error) {
|
||||
k8sVolume := service.GetVolume(lunUuid);
|
||||
if k8sVolume == nil {
|
||||
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("Can't find volume[%s].", srcVolId))
|
||||
return []webapi.SnapshotInfo{}, nil // return empty when the volume does not exist
|
||||
}
|
||||
|
||||
dsm, err := service.GetDsm(k8sVolume.DsmIp)
|
||||
@@ -651,201 +601,24 @@ func (service *DsmService) CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSp
|
||||
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Failed to get dsm: %v", err))
|
||||
}
|
||||
|
||||
if k8sVolume.Protocol == utils.ProtocolIscsi {
|
||||
snapshotSpec := webapi.SnapshotCreateSpec{
|
||||
Name: spec.SnapshotName,
|
||||
LunUuid: srcVolId,
|
||||
Description: spec.Description,
|
||||
TakenBy: spec.TakenBy,
|
||||
IsLocked: spec.IsLocked,
|
||||
}
|
||||
|
||||
snapshotUuid, err := dsm.SnapshotCreate(snapshotSpec)
|
||||
if err != nil {
|
||||
if err == utils.OutOfFreeSpaceError("") || err == utils.SnapshotReachMaxCountError("") {
|
||||
return nil,status.Errorf(codes.ResourceExhausted, fmt.Sprintf("Failed to SnapshotCreate(%s), err: %v", srcVolId, err))
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to SnapshotCreate(%s), err: %v", srcVolId, err))
|
||||
}
|
||||
|
||||
if snapshot := service.getISCSISnapshot(snapshotUuid); snapshot != nil {
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("Failed to get iscsi snapshot (%s). Not found", snapshotUuid))
|
||||
} else if k8sVolume.Protocol == utils.ProtocolSmb {
|
||||
snapshotSpec := webapi.ShareSnapshotCreateSpec{
|
||||
ShareName: k8sVolume.Share.Name,
|
||||
Desc: models.ShareSnapshotDescPrefix + spec.SnapshotName, // limitations: don't change the desc by DSM
|
||||
IsLocked: spec.IsLocked,
|
||||
}
|
||||
|
||||
snapshotTime, err := dsm.ShareSnapshotCreate(snapshotSpec)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to ShareSnapshotCreate(%s), err: %v", srcVolId, err))
|
||||
}
|
||||
|
||||
snapshots := service.listSMBSnapshotsByDsm(dsm)
|
||||
for _, snapshot := range snapshots {
|
||||
if snapshot.Time == snapshotTime && snapshot.ParentUuid == srcVolId {
|
||||
return snapshot, nil
|
||||
}
|
||||
}
|
||||
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("Failed to get smb snapshot (%s, %s). Not found", snapshotTime, srcVolId))
|
||||
}
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, "Unsupported volume protocol")
|
||||
}
|
||||
|
||||
func (service *DsmService) GetSnapshotByUuid(snapshotUuid string) *models.K8sSnapshotRespSpec {
|
||||
snaps := service.ListAllSnapshots()
|
||||
for _, snap := range snaps {
|
||||
if snap.Uuid == snapshotUuid {
|
||||
return snap
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (service *DsmService) DeleteSnapshot(snapshotUuid string) error {
|
||||
snapshot := service.GetSnapshotByUuid(snapshotUuid)
|
||||
if snapshot == nil {
|
||||
return nil
|
||||
}
|
||||
dsm, err := service.GetDsm(snapshot.DsmIp)
|
||||
infos, err := dsm.SnapshotList(lunUuid)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, status.Errorf(codes.Internal,
|
||||
fmt.Sprintf("Failed to list snapshot on lun [%s]. err: %v", lunUuid, err))
|
||||
}
|
||||
|
||||
if snapshot.Protocol == utils.ProtocolSmb {
|
||||
if err := dsm.ShareSnapshotDelete(snapshot.Time, snapshot.ParentName); err != nil {
|
||||
if snapshot := service.getSMBSnapshot(snapshotUuid); snapshot == nil { // idempotency
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Errorf("Failed to delete Share snapshot [%s]. err: %v", snapshotUuid, err)
|
||||
return err
|
||||
}
|
||||
} else if snapshot.Protocol == utils.ProtocolIscsi {
|
||||
if err := dsm.SnapshotDelete(snapshotUuid); err != nil {
|
||||
if _, err := dsm.SnapshotGet(snapshotUuid); err != nil { // idempotency
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Errorf("Failed to delete LUN snapshot [%s]. err: %v", snapshotUuid, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
func (service *DsmService) listISCSISnapshotsByDsm(dsm *webapi.DSM) (infos []*models.K8sSnapshotRespSpec) {
|
||||
volumes := service.listISCSIVolumes(dsm.Ip)
|
||||
for _, volume := range volumes {
|
||||
lunInfo := volume.Lun
|
||||
lunSnaps, err := dsm.SnapshotList(lunInfo.Uuid)
|
||||
func (service *DsmService) GetSnapshot(snapshotUuid string) (webapi.SnapshotInfo, error) {
|
||||
for _, dsm := range service.dsms {
|
||||
info, err := dsm.SnapshotGet(snapshotUuid)
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to list LUN[%s] snapshots: %v", dsm.Ip, lunInfo.Uuid, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, info := range lunSnaps {
|
||||
infos = append(infos, DsmLunSnapshotToK8sSnapshot(dsm.Ip, info, lunInfo))
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (service *DsmService) ListAllSnapshots() []*models.K8sSnapshotRespSpec {
|
||||
var allInfos []*models.K8sSnapshotRespSpec
|
||||
|
||||
for _, dsm := range service.dsms {
|
||||
allInfos = append(allInfos, service.listISCSISnapshotsByDsm(dsm)...)
|
||||
allInfos = append(allInfos, service.listSMBSnapshotsByDsm(dsm)...)
|
||||
}
|
||||
|
||||
return allInfos
|
||||
}
|
||||
|
||||
func (service *DsmService) ListSnapshots(volId string) []*models.K8sSnapshotRespSpec {
|
||||
var allInfos []*models.K8sSnapshotRespSpec
|
||||
|
||||
k8sVolume := service.GetVolume(volId);
|
||||
if k8sVolume == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
dsm, err := service.GetDsm(k8sVolume.DsmIp)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get DSM[%s]", k8sVolume.DsmIp)
|
||||
return nil
|
||||
}
|
||||
|
||||
if k8sVolume.Protocol == utils.ProtocolIscsi {
|
||||
infos, err := dsm.SnapshotList(volId)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to SnapshotList[%s]", volId)
|
||||
return nil
|
||||
}
|
||||
for _, info := range infos {
|
||||
allInfos = append(allInfos, DsmLunSnapshotToK8sSnapshot(dsm.Ip, info, k8sVolume.Lun))
|
||||
}
|
||||
} else {
|
||||
infos, err := dsm.ShareSnapshotList(k8sVolume.Share.Name)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to ShareSnapshotList[%s]", k8sVolume.Share.Name)
|
||||
return nil
|
||||
}
|
||||
for _, info := range infos {
|
||||
allInfos = append(allInfos, DsmShareSnapshotToK8sSnapshot(dsm.Ip, info, k8sVolume.Share))
|
||||
}
|
||||
}
|
||||
|
||||
return allInfos
|
||||
}
|
||||
|
||||
func DsmShareSnapshotToK8sSnapshot(dsmIp string, info webapi.ShareSnapshotInfo, shareInfo webapi.ShareInfo) *models.K8sSnapshotRespSpec {
|
||||
return &models.K8sSnapshotRespSpec{
|
||||
DsmIp: dsmIp,
|
||||
Name: strings.ReplaceAll(info.Desc, models.ShareSnapshotDescPrefix, ""), // snapshot-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
|
||||
Uuid: info.Uuid,
|
||||
ParentName: shareInfo.Name,
|
||||
ParentUuid: shareInfo.Uuid,
|
||||
Status: "Healthy", // share snapshot always Healthy
|
||||
SizeInBytes: utils.MBToBytes(shareInfo.QuotaValueInMB), // unable to get snapshot quota, return parent quota instead
|
||||
CreateTime: GMTToUnixSecond(info.Time),
|
||||
Time: info.Time,
|
||||
RootPath: shareInfo.VolPath,
|
||||
Protocol: utils.ProtocolSmb,
|
||||
}
|
||||
}
|
||||
|
||||
func DsmLunSnapshotToK8sSnapshot(dsmIp string, info webapi.SnapshotInfo, lunInfo webapi.LunInfo) *models.K8sSnapshotRespSpec {
|
||||
return &models.K8sSnapshotRespSpec{
|
||||
DsmIp: dsmIp,
|
||||
Name: info.Name,
|
||||
Uuid: info.Uuid,
|
||||
ParentName: lunInfo.Name, // it can be empty for iscsi
|
||||
ParentUuid: info.ParentUuid,
|
||||
Status: info.Status,
|
||||
SizeInBytes: info.TotalSize,
|
||||
CreateTime: info.CreateTime,
|
||||
Time: "",
|
||||
RootPath: info.RootPath,
|
||||
Protocol: utils.ProtocolIscsi,
|
||||
}
|
||||
}
|
||||
|
||||
func (service *DsmService) getISCSISnapshot(snapshotUuid string) *models.K8sSnapshotRespSpec {
|
||||
for _, dsm := range service.dsms {
|
||||
snapshots := service.listISCSISnapshotsByDsm(dsm)
|
||||
for _, snap := range snapshots {
|
||||
if snap.Uuid == snapshotUuid {
|
||||
return snap
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
return webapi.SnapshotInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("No such snapshot uuid [%s]", snapshotUuid))
|
||||
}
|
||||
|
||||
@@ -1,231 +0,0 @@
|
||||
/*
|
||||
* Copyright 2022 Synology Inc.
|
||||
*/
|
||||
|
||||
package service
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/models"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
|
||||
)
|
||||
|
||||
func GMTToUnixSecond(timeStr string) (int64) {
|
||||
t, err := time.Parse("GMT-07-2006.01.02-15.04.05", timeStr)
|
||||
if err != nil {
|
||||
log.Error(err)
|
||||
return -1
|
||||
}
|
||||
return t.Unix()
|
||||
}
|
||||
|
||||
func (service *DsmService) createSMBVolumeBySnapshot(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcSnapshot *models.K8sSnapshotRespSpec) (*models.K8sVolumeRespSpec, error) {
|
||||
srcShareInfo, err := dsm.ShareGet(srcSnapshot.ParentName)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get share: %s, err: %v", srcSnapshot.ParentName, err))
|
||||
}
|
||||
|
||||
shareCloneSpec := webapi.ShareCloneSpec{
|
||||
Name: spec.ShareName,
|
||||
Snapshot: srcSnapshot.Time,
|
||||
ShareInfo: webapi.ShareInfo{
|
||||
Name: spec.ShareName,
|
||||
VolPath: srcSnapshot.RootPath,
|
||||
Desc: "Cloned from [" + srcSnapshot.Time + "] by csi driver", // max: 64
|
||||
EnableRecycleBin: srcShareInfo.EnableRecycleBin,
|
||||
RecycleBinAdminOnly: srcShareInfo.RecycleBinAdminOnly,
|
||||
NameOrg: srcSnapshot.ParentName,
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := dsm.ShareClone(shareCloneSpec); err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source volume ID: %s, err: %v", srcShareInfo.Uuid, err))
|
||||
}
|
||||
|
||||
shareInfo, err := dsm.ShareGet(spec.ShareName)
|
||||
if err != nil {
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed Share with name: [%s], err: %v", spec.ShareName, err))
|
||||
}
|
||||
|
||||
newSizeInMB := utils.BytesToMBCeil(spec.Size)
|
||||
if shareInfo.QuotaValueInMB == 0 {
|
||||
// known issue for some DS, manually set quota to the new share
|
||||
if err := dsm.SetShareQuota(shareInfo, newSizeInMB); err != nil {
|
||||
msg := fmt.Sprintf("Failed to set quota [%d] to Share [%s], err: %v", newSizeInMB, shareInfo.Name, err)
|
||||
log.Error(msg)
|
||||
return nil, status.Errorf(codes.Internal, msg)
|
||||
}
|
||||
|
||||
shareInfo.QuotaValueInMB = newSizeInMB
|
||||
}
|
||||
|
||||
if newSizeInMB != shareInfo.QuotaValueInMB {
|
||||
// FIXME: need to delete share
|
||||
return nil,
|
||||
status.Errorf(codes.OutOfRange, "Requested share quotaMB [%d] is not equal to snapshot restore quotaMB [%d]", newSizeInMB, shareInfo.QuotaValueInMB)
|
||||
}
|
||||
|
||||
log.Debugf("[%s] createSMBVolumeBySnapshot Successfully. VolumeId: %s", dsm.Ip, shareInfo.Uuid);
|
||||
|
||||
return DsmShareToK8sVolume(dsm.Ip, shareInfo), nil
|
||||
}
|
||||
|
||||
func (service *DsmService) createSMBVolumeByVolume(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcShareInfo webapi.ShareInfo) (*models.K8sVolumeRespSpec, error) {
|
||||
newSizeInMB := utils.BytesToMBCeil(spec.Size)
|
||||
if spec.Size != 0 && newSizeInMB != srcShareInfo.QuotaValueInMB {
|
||||
return nil,
|
||||
status.Errorf(codes.OutOfRange, "Requested share quotaMB [%d] is not equal to src share quotaMB [%d]", newSizeInMB, srcShareInfo.QuotaValueInMB)
|
||||
}
|
||||
|
||||
shareCloneSpec := webapi.ShareCloneSpec{
|
||||
Name: spec.ShareName,
|
||||
Snapshot: "",
|
||||
ShareInfo: webapi.ShareInfo{
|
||||
Name: spec.ShareName,
|
||||
VolPath: srcShareInfo.VolPath, // must be same with srcShare location
|
||||
Desc: "Cloned from [" + srcShareInfo.Name + "] by csi driver", // max: 64
|
||||
EnableRecycleBin: srcShareInfo.EnableRecycleBin,
|
||||
RecycleBinAdminOnly: srcShareInfo.RecycleBinAdminOnly,
|
||||
NameOrg: srcShareInfo.Name,
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := dsm.ShareClone(shareCloneSpec); err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source volume ID: %s, err: %v", srcShareInfo.Uuid, err))
|
||||
}
|
||||
|
||||
shareInfo, err := dsm.ShareGet(spec.ShareName)
|
||||
if err != nil {
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed Share with name: [%s], err: %v", spec.ShareName, err))
|
||||
}
|
||||
|
||||
if shareInfo.QuotaValueInMB == 0 {
|
||||
// known issue for some DS, manually set quota to the new share
|
||||
if err := dsm.SetShareQuota(shareInfo, newSizeInMB); err != nil {
|
||||
msg := fmt.Sprintf("Failed to set quota [%d] to Share [%s], err: %v", newSizeInMB, shareInfo.Name, err)
|
||||
log.Error(msg)
|
||||
return nil, status.Errorf(codes.Internal, msg)
|
||||
}
|
||||
|
||||
shareInfo.QuotaValueInMB = newSizeInMB
|
||||
}
|
||||
|
||||
log.Debugf("[%s] createSMBVolumeByVolume Successfully. VolumeId: %s", dsm.Ip, shareInfo.Uuid);
|
||||
|
||||
return DsmShareToK8sVolume(dsm.Ip, shareInfo), nil
|
||||
}
|
||||
|
||||
func (service *DsmService) createSMBVolumeByDsm(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec) (*models.K8sVolumeRespSpec, error) {
|
||||
// TODO: Check if share name is allowable
|
||||
|
||||
// 1. Find a available location
|
||||
if spec.Location == "" {
|
||||
vol, err := service.getFirstAvailableVolume(dsm, spec.Size)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get available location, err: %v", err))
|
||||
}
|
||||
spec.Location = vol.Path
|
||||
}
|
||||
|
||||
// 2. Check if location exists
|
||||
_, err := dsm.VolumeGet(spec.Location)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Unable to find location %s", spec.Location))
|
||||
}
|
||||
|
||||
// 3. Create Share
|
||||
sizeInMB := utils.BytesToMBCeil(spec.Size)
|
||||
shareSpec := webapi.ShareCreateSpec{
|
||||
Name: spec.ShareName,
|
||||
ShareInfo: webapi.ShareInfo{
|
||||
Name: spec.ShareName,
|
||||
VolPath: spec.Location,
|
||||
Desc: "Created by Synology K8s CSI",
|
||||
EnableShareCow: false,
|
||||
EnableRecycleBin: true,
|
||||
RecycleBinAdminOnly: true,
|
||||
Encryption: 0,
|
||||
QuotaForCreate: &sizeInMB,
|
||||
},
|
||||
}
|
||||
|
||||
log.Debugf("ShareCreate spec: %v", shareSpec)
|
||||
err = dsm.ShareCreate(shareSpec)
|
||||
if err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
|
||||
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to create share, err: %v", err))
|
||||
}
|
||||
|
||||
shareInfo, err := dsm.ShareGet(spec.ShareName)
|
||||
if err != nil {
|
||||
return nil,
|
||||
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed Share with name: %s, err: %v", spec.ShareName, err))
|
||||
}
|
||||
|
||||
log.Debugf("[%s] createSMBVolumeByDsm Successfully. VolumeId: %s", dsm.Ip, shareInfo.Uuid)
|
||||
|
||||
return DsmShareToK8sVolume(dsm.Ip, shareInfo), nil
|
||||
}
|
||||
|
||||
func (service *DsmService) listSMBVolumes(dsmIp string) (infos []*models.K8sVolumeRespSpec) {
|
||||
for _, dsm := range service.dsms {
|
||||
if dsmIp != "" && dsmIp != dsm.Ip {
|
||||
continue
|
||||
}
|
||||
|
||||
shares, err := dsm.ShareList()
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to list shares: %v", dsm.Ip, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, share := range shares {
|
||||
if !strings.HasPrefix(share.Name, models.SharePrefix) {
|
||||
continue
|
||||
}
|
||||
infos = append(infos, DsmShareToK8sVolume(dsm.Ip, share))
|
||||
}
|
||||
}
|
||||
|
||||
return infos
|
||||
}
|
||||
|
||||
func (service *DsmService) listSMBSnapshotsByDsm(dsm *webapi.DSM) (infos []*models.K8sSnapshotRespSpec) {
|
||||
volumes := service.listSMBVolumes(dsm.Ip)
|
||||
for _, volume := range volumes {
|
||||
shareInfo := volume.Share
|
||||
shareSnaps, err := dsm.ShareSnapshotList(shareInfo.Name)
|
||||
if err != nil {
|
||||
log.Errorf("[%s] Failed to list share snapshots: %v", dsm.Ip, err)
|
||||
continue
|
||||
}
|
||||
for _, info := range shareSnaps {
|
||||
infos = append(infos, DsmShareSnapshotToK8sSnapshot(dsm.Ip, info, shareInfo))
|
||||
}
|
||||
}
|
||||
return infos
|
||||
}
|
||||
|
||||
func (service *DsmService) getSMBSnapshot(snapshotUuid string) *models.K8sSnapshotRespSpec {
|
||||
for _, dsm := range service.dsms {
|
||||
snapshots := service.listSMBSnapshotsByDsm(dsm)
|
||||
for _, snap := range snapshots {
|
||||
if snap.Uuid == snapshotUuid {
|
||||
return snap
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -58,7 +58,6 @@ type SnapshotInfo struct {
|
||||
Status string `json:"status"`
|
||||
TotalSize int64 `json:"total_size"`
|
||||
CreateTime int64 `json:"create_time"`
|
||||
RootPath string `json:"root_path"`
|
||||
}
|
||||
|
||||
type LunDevAttrib struct {
|
||||
@@ -108,8 +107,6 @@ func errCodeMapping(errCode int, oriErr error) error {
|
||||
switch errCode {
|
||||
case 18990002: // Out of free space
|
||||
return utils.OutOfFreeSpaceError("")
|
||||
case 18990531: // No such LUN
|
||||
return utils.NoSuchLunError("")
|
||||
case 18990538: // Duplicated LUN name
|
||||
return utils.AlreadyExistError("")
|
||||
case 18990541:
|
||||
@@ -127,7 +124,7 @@ func errCodeMapping(errCode int, oriErr error) error {
|
||||
}
|
||||
|
||||
if errCode > 18990000 {
|
||||
return utils.IscsiDefaultError{errCode}
|
||||
return utils.IscsiDefaultError("")
|
||||
}
|
||||
return oriErr
|
||||
}
|
||||
@@ -146,7 +143,7 @@ func (dsm *DSM) LunList() ([]LunInfo, error) {
|
||||
|
||||
resp, err := dsm.sendRequest("", &LunInfos{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return nil, errCodeMapping(resp.ErrorCode, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lunInfos, ok := resp.Data.(*LunInfos)
|
||||
@@ -177,8 +174,9 @@ func (dsm *DSM) LunCreate(spec LunCreateSpec) (string, error) {
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &LunCreateResp{}, params, "webapi/entry.cgi")
|
||||
err = errCodeMapping(resp.ErrorCode, err)
|
||||
if err != nil {
|
||||
return "", errCodeMapping(resp.ErrorCode, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
lunResp, ok := resp.Data.(*LunCreateResp)
|
||||
@@ -197,9 +195,9 @@ func (dsm *DSM) LunUpdate(spec LunUpdateSpec) error {
|
||||
params.Add("uuid", strconv.Quote(spec.Uuid))
|
||||
params.Add("new_size", strconv.FormatInt(int64(spec.NewSize), 10))
|
||||
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return errCodeMapping(resp.ErrorCode, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -218,9 +216,9 @@ func (dsm *DSM) LunGet(uuid string) (LunInfo, error) {
|
||||
}
|
||||
info := Info{}
|
||||
|
||||
resp, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
|
||||
_, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return LunInfo{}, errCodeMapping(resp.ErrorCode, err)
|
||||
return LunInfo{}, err
|
||||
}
|
||||
|
||||
return info.Lun, nil
|
||||
@@ -240,8 +238,9 @@ func (dsm *DSM) LunClone(spec LunCloneSpec) (string, error) {
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &LunCloneResp{}, params, "webapi/entry.cgi")
|
||||
err = errCodeMapping(resp.ErrorCode, err)
|
||||
if err != nil {
|
||||
return "", errCodeMapping(resp.ErrorCode, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
cloneLunResp, ok := resp.Data.(*LunCloneResp)
|
||||
@@ -265,7 +264,7 @@ func (dsm *DSM) TargetList() ([]TargetInfo, error) {
|
||||
|
||||
resp, err := dsm.sendRequest("", &TargetInfos{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return nil, errCodeMapping(resp.ErrorCode, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
trgInfos, ok := resp.Data.(*TargetInfos)
|
||||
@@ -288,9 +287,9 @@ func (dsm *DSM) TargetGet(targetId string) (TargetInfo, error) {
|
||||
}
|
||||
info := Info{}
|
||||
|
||||
resp, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
|
||||
_, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return TargetInfo{}, errCodeMapping(resp.ErrorCode, err)
|
||||
return TargetInfo{}, err
|
||||
}
|
||||
|
||||
return info.Target, nil
|
||||
@@ -305,9 +304,9 @@ func (dsm *DSM) TargetSet(targetId string, maxSession int) error {
|
||||
params.Add("target_id", strconv.Quote(targetId))
|
||||
params.Add("max_sessions", strconv.Itoa(maxSession))
|
||||
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return errCodeMapping(resp.ErrorCode, err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -327,8 +326,9 @@ func (dsm *DSM) TargetCreate(spec TargetCreateSpec) (string, error) {
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &TrgCreateResp{}, params, "webapi/entry.cgi")
|
||||
err = errCodeMapping(resp.ErrorCode, err)
|
||||
if err != nil {
|
||||
return "", errCodeMapping(resp.ErrorCode, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
trgResp, ok := resp.Data.(*TrgCreateResp)
|
||||
@@ -351,9 +351,9 @@ func (dsm *DSM) LunMapTarget(targetIds []string, lunUuid string) error {
|
||||
log.Debugln(params)
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return errCodeMapping(resp.ErrorCode, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -365,9 +365,9 @@ func (dsm *DSM) LunDelete(lunUuid string) error {
|
||||
params.Add("version", "1")
|
||||
params.Add("uuid", strconv.Quote(lunUuid))
|
||||
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return errCodeMapping(resp.ErrorCode, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -379,9 +379,9 @@ func (dsm *DSM) TargetDelete(targetName string) error {
|
||||
params.Add("version", "1")
|
||||
params.Add("target_id", strconv.Quote(targetName))
|
||||
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return errCodeMapping(resp.ErrorCode, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,109 +7,21 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/logger"
|
||||
)
|
||||
|
||||
type ShareInfo struct {
|
||||
Name string `json:"name"` // required
|
||||
VolPath string `json:"vol_path"` // required
|
||||
Name string `json:"name"`
|
||||
VolPath string `json:"vol_path"`
|
||||
Desc string `json:"desc"`
|
||||
EnableShareCow bool `json:"enable_share_cow"` // field for create
|
||||
EnableShareCow bool `json:"enable_share_cow"`
|
||||
EnableRecycleBin bool `json:"enable_recycle_bin"`
|
||||
RecycleBinAdminOnly bool `json:"recycle_bin_admin_only"`
|
||||
Encryption int `json:"encryption"` // field for create
|
||||
QuotaForCreate *int64 `json:"share_quota,omitempty"`
|
||||
QuotaValueInMB int64 `json:"quota_value"` // field for get
|
||||
SupportSnapshot bool `json:"support_snapshot"` // field for get
|
||||
Uuid string `json:"uuid"` // field for get
|
||||
NameOrg string `json:"name_org"` // required for clone
|
||||
}
|
||||
|
||||
type ShareUpdateInfo struct {
|
||||
Name string `json:"name"` // required
|
||||
VolPath string `json:"vol_path"` // required
|
||||
QuotaForCreate *int64 `json:"share_quota,omitempty"`
|
||||
// Add properties you want to update to shares here
|
||||
}
|
||||
|
||||
type ShareSnapshotInfo struct {
|
||||
Uuid string `json:"ruuid"`
|
||||
Time string `json:"time"`
|
||||
Desc string `json:"desc"`
|
||||
SnapSize string `json:"snap_size"` // the complete size of the snapshot
|
||||
Lock bool `json:"lock"`
|
||||
ScheduleSnapshot bool `json:"schedule_snapshot"`
|
||||
Encryption int `json:"encryption"`
|
||||
}
|
||||
|
||||
type ShareCreateSpec struct {
|
||||
Name string
|
||||
ShareInfo ShareInfo
|
||||
}
|
||||
|
||||
type ShareCloneSpec struct {
|
||||
Name string
|
||||
Snapshot string
|
||||
ShareInfo ShareInfo
|
||||
}
|
||||
|
||||
type ShareSnapshotCreateSpec struct {
|
||||
ShareName string
|
||||
Desc string
|
||||
IsLocked bool
|
||||
}
|
||||
|
||||
type SharePermissionSetSpec struct {
|
||||
Name string
|
||||
UserGroupType string // "local_user"/"local_group"/"system"
|
||||
Permissions []*SharePermission
|
||||
}
|
||||
|
||||
type SharePermission struct {
|
||||
Name string `json:"name"`
|
||||
IsReadonly bool `json:"is_readonly"`
|
||||
IsWritable bool `json:"is_writable"`
|
||||
IsDeny bool `json:"is_deny"`
|
||||
IsCustom bool `json:"is_custom,omitempty"`
|
||||
IsAdmin bool `json:"is_admin,omitempty"` // field for list
|
||||
}
|
||||
|
||||
func shareErrCodeMapping(errCode int, oriErr error) error {
|
||||
switch errCode {
|
||||
case 402: // No such share
|
||||
return utils.NoSuchShareError("")
|
||||
case 403: // Invalid input value
|
||||
return utils.BadParametersError("")
|
||||
case 3301: // already exists
|
||||
return utils.AlreadyExistError("")
|
||||
case 3309:
|
||||
return utils.ShareReachMaxCountError("")
|
||||
case 3328:
|
||||
return utils.ShareSystemBusyError("")
|
||||
}
|
||||
|
||||
if errCode >= 3300 {
|
||||
return utils.ShareDefaultError{errCode}
|
||||
}
|
||||
return oriErr
|
||||
}
|
||||
|
||||
// ----------------------- Share APIs -----------------------
|
||||
func (dsm *DSM) ShareGet(shareName string) (ShareInfo, error) {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share")
|
||||
params.Add("method", "get")
|
||||
params.Add("version", "1")
|
||||
params.Add("additional", "[\"encryption\", \"enable_share_cow\", \"recyclebin\", \"support_snapshot\", \"share_quota\"]")
|
||||
params.Add("name", strconv.Quote(shareName))
|
||||
|
||||
info := ShareInfo{}
|
||||
|
||||
resp, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
|
||||
|
||||
return info, shareErrCodeMapping(resp.ErrorCode, err)
|
||||
Name string `json:"name"`
|
||||
ShareInfo ShareInfo `json:"shareinfo"`
|
||||
}
|
||||
|
||||
func (dsm *DSM) ShareList() ([]ShareInfo, error) {
|
||||
@@ -117,7 +29,7 @@ func (dsm *DSM) ShareList() ([]ShareInfo, error) {
|
||||
params.Add("api", "SYNO.Core.Share")
|
||||
params.Add("method", "list")
|
||||
params.Add("version", "1")
|
||||
params.Add("additional", "[\"encryption\", \"enable_share_cow\", \"recyclebin\", \"support_snapshot\", \"share_quota\"]")
|
||||
params.Add("additional", "[\"encryption\"]")
|
||||
|
||||
type ShareInfos struct {
|
||||
Shares []ShareInfo `json:"shares"`
|
||||
@@ -125,7 +37,7 @@ func (dsm *DSM) ShareList() ([]ShareInfo, error) {
|
||||
|
||||
resp, err := dsm.sendRequest("", &ShareInfos{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return nil, shareErrCodeMapping(resp.ErrorCode, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
infos, ok := resp.Data.(*ShareInfos)
|
||||
@@ -149,49 +61,13 @@ func (dsm *DSM) ShareCreate(spec ShareCreateSpec) error {
|
||||
}
|
||||
params.Add("shareinfo", string(js))
|
||||
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
|
||||
return shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
func (dsm *DSM) ShareClone(spec ShareCloneSpec) (string, error) {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share")
|
||||
params.Add("method", "clone")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", strconv.Quote(spec.Name))
|
||||
|
||||
// if clone from snapshot, the NameOrg must be the parent of the snapshot, or the webapi will return 3300
|
||||
// if the snapshot doesn't exist, it will return 3300 too.
|
||||
if spec.ShareInfo.NameOrg == "" {
|
||||
return "", fmt.Errorf("Clone failed. The source name can't be empty.")
|
||||
}
|
||||
|
||||
if spec.Snapshot != "" {
|
||||
params.Add("snapshot", strconv.Quote(spec.Snapshot))
|
||||
}
|
||||
|
||||
js, err := json.Marshal(spec.ShareInfo)
|
||||
// response : {"data":{"name":"Share-2"},"success":true}
|
||||
_, err = dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
params.Add("shareinfo", string(js))
|
||||
|
||||
type ShareCreateResp struct {
|
||||
Name string `json:"name"`
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &ShareCreateResp{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return "", shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
shareResp, ok := resp.Data.(*ShareCreateResp)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("Failed to assert response to %T", &ShareCreateResp{})
|
||||
}
|
||||
|
||||
return shareResp.Name, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dsm *DSM) ShareDelete(shareName string) error {
|
||||
@@ -199,165 +75,13 @@ func (dsm *DSM) ShareDelete(shareName string) error {
|
||||
params.Add("api", "SYNO.Core.Share")
|
||||
params.Add("method", "delete")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", fmt.Sprintf("[%s]", strconv.Quote(shareName)))
|
||||
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
|
||||
return shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
func (dsm *DSM) ShareSet(shareName string, updateInfo ShareUpdateInfo) error {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share")
|
||||
params.Add("method", "set")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", strconv.Quote(shareName))
|
||||
|
||||
js, err := json.Marshal(updateInfo)
|
||||
// response : {"success":true}
|
||||
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
params.Add("shareinfo", string(js))
|
||||
|
||||
if logger.WebapiDebug {
|
||||
log.Debugln(params)
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
|
||||
return shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
func (dsm *DSM) SetShareQuota(shareInfo ShareInfo, newSizeInMB int64) error {
|
||||
updateInfo := ShareUpdateInfo{
|
||||
Name: shareInfo.Name,
|
||||
VolPath: shareInfo.VolPath,
|
||||
QuotaForCreate: &newSizeInMB,
|
||||
}
|
||||
return dsm.ShareSet(shareInfo.Name, updateInfo)
|
||||
}
|
||||
|
||||
// ----------------------- Share Snapshot APIs -----------------------
|
||||
func (dsm *DSM) ShareSnapshotCreate(spec ShareSnapshotCreateSpec) (string, error) {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share.Snapshot")
|
||||
params.Add("method", "create")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", strconv.Quote(spec.ShareName))
|
||||
|
||||
type SnapInfo struct {
|
||||
Desc string `json:"desc"`
|
||||
IsLocked bool `json:"lock"` // default true
|
||||
}
|
||||
|
||||
snapinfo := SnapInfo{
|
||||
Desc: spec.Desc,
|
||||
IsLocked: spec.IsLocked,
|
||||
}
|
||||
js, err := json.Marshal(snapinfo)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
params.Add("snapinfo", string(js))
|
||||
|
||||
var snapTime string
|
||||
resp, err := dsm.sendRequest("", &snapTime, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return "", shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
return snapTime, nil // "GMT+08-2022.01.14-19.18.29"
|
||||
}
|
||||
|
||||
func (dsm *DSM) ShareSnapshotList(name string) ([]ShareSnapshotInfo, error) {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share.Snapshot")
|
||||
params.Add("method", "list")
|
||||
params.Add("version", "2")
|
||||
params.Add("name", strconv.Quote(name))
|
||||
params.Add("additional", "[\"desc\", \"lock\", \"schedule_snapshot\", \"ruuid\", \"snap_size\"]")
|
||||
|
||||
type Infos struct {
|
||||
Snapshots []ShareSnapshotInfo `json:"snapshots"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &Infos{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return nil, shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
infos, ok := resp.Data.(*Infos)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to assert response to %T", &Infos{})
|
||||
}
|
||||
|
||||
return infos.Snapshots, nil
|
||||
}
|
||||
|
||||
func (dsm *DSM) ShareSnapshotDelete(snapTime string, shareName string) error {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share.Snapshot")
|
||||
params.Add("method", "delete")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", strconv.Quote(shareName))
|
||||
params.Add("snapshots", fmt.Sprintf("[%s]", strconv.Quote(snapTime))) // ["GMT+08-2022.01.14-19.18.29"]
|
||||
|
||||
var objmap []map[string]interface{}
|
||||
resp, err := dsm.sendRequest("", &objmap, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
if len(objmap) > 0 {
|
||||
return fmt.Errorf("Failed to delete snapshot, API common error. snapshot: %s", snapTime)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ----------------------- Share Permission APIs -----------------------
|
||||
func (dsm *DSM) SharePermissionSet(spec SharePermissionSetSpec) error {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share.Permission")
|
||||
params.Add("method", "set")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", strconv.Quote(spec.Name))
|
||||
params.Add("user_group_type", strconv.Quote(spec.UserGroupType))
|
||||
|
||||
js, err := json.Marshal(spec.Permissions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
params.Add("permissions", string(js))
|
||||
|
||||
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
|
||||
|
||||
return shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
func (dsm *DSM) SharePermissionList(shareName string, userGroupType string) ([]SharePermission, error) {
|
||||
params := url.Values{}
|
||||
params.Add("api", "SYNO.Core.Share.Permission")
|
||||
params.Add("method", "list")
|
||||
params.Add("version", "1")
|
||||
params.Add("name", strconv.Quote(shareName))
|
||||
params.Add("user_group_type", strconv.Quote(userGroupType))
|
||||
|
||||
type SharePermissions struct {
|
||||
Permissions []SharePermission `json:"items"`
|
||||
}
|
||||
|
||||
resp, err := dsm.sendRequest("", &SharePermissions{}, params, "webapi/entry.cgi")
|
||||
if err != nil {
|
||||
return nil, shareErrCodeMapping(resp.ErrorCode, err)
|
||||
}
|
||||
|
||||
infos, ok := resp.Data.(*SharePermissions)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("Failed to assert response to %T", &SharePermissions{})
|
||||
}
|
||||
|
||||
return infos.Permissions, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -16,15 +16,14 @@ type IDsmService interface {
|
||||
GetDsm(ip string) (*webapi.DSM, error)
|
||||
GetDsmsCount() int
|
||||
ListDsmVolumes(ip string) ([]webapi.VolInfo, error)
|
||||
CreateVolume(spec *models.CreateK8sVolumeSpec) (*models.K8sVolumeRespSpec, error)
|
||||
DeleteVolume(volId string) error
|
||||
ListVolumes() []*models.K8sVolumeRespSpec
|
||||
GetVolume(volId string) *models.K8sVolumeRespSpec
|
||||
ExpandVolume(volId string, newSize int64) (*models.K8sVolumeRespSpec, error)
|
||||
CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSpec) (*models.K8sSnapshotRespSpec, error)
|
||||
CreateVolume(spec *models.CreateK8sVolumeSpec) (webapi.LunInfo, string, error)
|
||||
DeleteVolume(volumeId string) error
|
||||
ListVolumes() []*models.ListK8sVolumeRespSpec
|
||||
GetVolume(lunUuid string) *models.ListK8sVolumeRespSpec
|
||||
ExpandLun(lunUuid string, newSize int64) error
|
||||
CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSpec) (string, error)
|
||||
DeleteSnapshot(snapshotUuid string) error
|
||||
ListAllSnapshots() []*models.K8sSnapshotRespSpec
|
||||
ListSnapshots(volId string) []*models.K8sSnapshotRespSpec
|
||||
GetVolumeByName(volName string) *models.K8sVolumeRespSpec
|
||||
GetSnapshotByName(snapshotName string) *models.K8sSnapshotRespSpec
|
||||
ListAllSnapshots() ([]webapi.SnapshotInfo, error)
|
||||
ListSnapshots(lunUuid string) ([]webapi.SnapshotInfo, error)
|
||||
GetSnapshot(snapshotUuid string) (webapi.SnapshotInfo, error)
|
||||
}
|
||||
@@ -2,10 +2,6 @@
|
||||
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
K8sCsiName = "Kubernetes CSI"
|
||||
|
||||
@@ -19,30 +15,8 @@ const (
|
||||
LunTypeBlunThick = "BLUN_THICK" // thick provision, mapped to type 259
|
||||
MaxIqnLen = 128
|
||||
|
||||
// Share definitions
|
||||
MaxShareLen = 32
|
||||
MaxShareDescLen = 64
|
||||
UserGroupTypeLocalUser = "local_user"
|
||||
UserGroupTypeLocalGroup = "local_group"
|
||||
UserGroupTypeSystem = "system"
|
||||
|
||||
|
||||
// CSI definitions
|
||||
TargetPrefix = "k8s-csi"
|
||||
LunPrefix = "k8s-csi"
|
||||
IqnPrefix = "iqn.2000-01.com.synology:"
|
||||
SharePrefix = "k8s-csi"
|
||||
ShareSnapshotDescPrefix = "(Do not change)"
|
||||
)
|
||||
|
||||
func GenLunName(volName string) string {
|
||||
return fmt.Sprintf("%s-%s", LunPrefix, volName)
|
||||
}
|
||||
|
||||
func GenShareName(volName string) string {
|
||||
shareName := fmt.Sprintf("%s-%s", SharePrefix, volName)
|
||||
if len(shareName) > MaxShareLen {
|
||||
return shareName[:MaxShareLen]
|
||||
}
|
||||
return shareName
|
||||
}
|
||||
TargetPrefix = "k8s-csi"
|
||||
LunPrefix = "k8s-csi"
|
||||
IqnPrefix = "iqn.2000-01.com.synology:"
|
||||
)
|
||||
@@ -3,8 +3,6 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
|
||||
)
|
||||
|
||||
@@ -12,43 +10,21 @@ type CreateK8sVolumeSpec struct {
|
||||
DsmIp string
|
||||
K8sVolumeName string
|
||||
LunName string
|
||||
ShareName string
|
||||
Location string
|
||||
Size int64
|
||||
Type string
|
||||
ThinProvisioning bool
|
||||
TargetName string
|
||||
TargetIqn string
|
||||
MultipleSession bool
|
||||
SourceSnapshotId string
|
||||
SourceVolumeId string
|
||||
Protocol string
|
||||
}
|
||||
|
||||
type K8sVolumeRespSpec struct {
|
||||
DsmIp string
|
||||
VolumeId string
|
||||
SizeInBytes int64
|
||||
Location string
|
||||
Name string
|
||||
Source string
|
||||
Lun webapi.LunInfo
|
||||
Target webapi.TargetInfo
|
||||
Share webapi.ShareInfo
|
||||
Protocol string
|
||||
}
|
||||
|
||||
type K8sSnapshotRespSpec struct {
|
||||
DsmIp string
|
||||
Name string
|
||||
Uuid string
|
||||
ParentName string
|
||||
ParentUuid string
|
||||
Status string
|
||||
SizeInBytes int64
|
||||
CreateTime int64
|
||||
Time string // only for share snapshot delete
|
||||
RootPath string
|
||||
Protocol string
|
||||
type ListK8sVolumeRespSpec struct {
|
||||
DsmIp string
|
||||
Lun webapi.LunInfo
|
||||
Target webapi.TargetInfo
|
||||
}
|
||||
|
||||
type CreateK8sVolumeSnapshotSpec struct {
|
||||
@@ -57,24 +33,4 @@ type CreateK8sVolumeSnapshotSpec struct {
|
||||
Description string
|
||||
TakenBy string
|
||||
IsLocked bool
|
||||
}
|
||||
|
||||
type NodeStageVolumeSpec struct {
|
||||
VolumeId string
|
||||
StagingTargetPath string
|
||||
VolumeCapability *csi.VolumeCapability
|
||||
Dsm string
|
||||
Source string
|
||||
}
|
||||
|
||||
type ByVolumeId []*K8sVolumeRespSpec
|
||||
func (a ByVolumeId) Len() int { return len(a) }
|
||||
func (a ByVolumeId) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a ByVolumeId) Less(i, j int) bool { return a[i].VolumeId < a[j].VolumeId }
|
||||
|
||||
type BySnapshotAndParentUuid []*K8sSnapshotRespSpec
|
||||
func (a BySnapshotAndParentUuid) Len() int { return len(a) }
|
||||
func (a BySnapshotAndParentUuid) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a BySnapshotAndParentUuid) Less(i, j int) bool {
|
||||
return fmt.Sprintf("%s/%s", a[i].ParentUuid, a[i].Uuid) < fmt.Sprintf("%s/%s", a[j].ParentUuid, a[j].Uuid)
|
||||
}
|
||||
}
|
||||
@@ -2,28 +2,14 @@
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type OutOfFreeSpaceError string
|
||||
type AlreadyExistError string
|
||||
type BadParametersError string
|
||||
type NoSuchLunError string
|
||||
type LunReachMaxCountError string
|
||||
type TargetReachMaxCountError string
|
||||
type NoSuchSnapshotError string
|
||||
type BadLunTypeError string
|
||||
type SnapshotReachMaxCountError string
|
||||
type IscsiDefaultError struct {
|
||||
ErrCode int
|
||||
}
|
||||
type NoSuchShareError string
|
||||
type ShareReachMaxCountError string
|
||||
type ShareSystemBusyError string
|
||||
type ShareDefaultError struct {
|
||||
ErrCode int
|
||||
}
|
||||
type IscsiDefaultError string
|
||||
|
||||
func (_ OutOfFreeSpaceError) Error() string {
|
||||
return "Out of free space"
|
||||
@@ -31,14 +17,6 @@ func (_ OutOfFreeSpaceError) Error() string {
|
||||
func (_ AlreadyExistError) Error() string {
|
||||
return "Already Existed"
|
||||
}
|
||||
func (_ BadParametersError) Error() string {
|
||||
return "Invalid input value"
|
||||
}
|
||||
|
||||
// ISCSI errors
|
||||
func (_ NoSuchLunError) Error() string {
|
||||
return "No such LUN"
|
||||
}
|
||||
|
||||
func (_ LunReachMaxCountError) Error() string {
|
||||
return "Number of LUN reach limit"
|
||||
@@ -60,23 +38,6 @@ func (_ SnapshotReachMaxCountError) Error() string {
|
||||
return "Number of snapshot reach limit"
|
||||
}
|
||||
|
||||
func (e IscsiDefaultError) Error() string {
|
||||
return fmt.Sprintf("ISCSI API error. Error code: %d", e.ErrCode)
|
||||
}
|
||||
|
||||
// Share errors
|
||||
func (_ NoSuchShareError) Error() string {
|
||||
return "No such share"
|
||||
}
|
||||
|
||||
func (_ ShareReachMaxCountError) Error() string {
|
||||
return "Number of share reach limit"
|
||||
}
|
||||
|
||||
func (_ ShareSystemBusyError) Error() string {
|
||||
return "Share system is temporary busy"
|
||||
}
|
||||
|
||||
func (e ShareDefaultError) Error() string {
|
||||
return fmt.Sprintf("Share API error. Error code: %d", e.ErrCode)
|
||||
}
|
||||
func (_ IscsiDefaultError) Error() string {
|
||||
return "Default ISCSI error"
|
||||
}
|
||||
@@ -8,42 +8,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type AuthType string
|
||||
|
||||
const (
|
||||
UNIT_GB = 1024 * 1024 * 1024
|
||||
UNIT_MB = 1024 * 1024
|
||||
|
||||
ProtocolSmb = "smb"
|
||||
ProtocolIscsi = "iscsi"
|
||||
ProtocolDefault = ProtocolIscsi
|
||||
|
||||
AuthTypeReadWrite AuthType = "rw"
|
||||
AuthTypeReadOnly AuthType = "ro"
|
||||
AuthTypeNoAccess AuthType = "no"
|
||||
)
|
||||
|
||||
func SliceContains(items []string, s string) bool {
|
||||
for _, item := range items {
|
||||
if s == item {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func MBToBytes(size int64) int64 {
|
||||
return size * UNIT_MB
|
||||
}
|
||||
|
||||
func BytesToMB(size int64) int64 {
|
||||
return size / UNIT_MB
|
||||
}
|
||||
|
||||
// Ceiling
|
||||
func BytesToMBCeil(size int64) int64 {
|
||||
return (size + UNIT_MB - 1) / UNIT_MB
|
||||
}
|
||||
const UNIT_GB = 1024 * 1024 * 1024
|
||||
|
||||
func StringToBoolean(value string) bool {
|
||||
value = strings.ToLower(value)
|
||||
@@ -65,4 +30,4 @@ func LookupIPv4(name string) ([]string, error) {
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Failed to LookupIPv4 by local resolver for: %s", name)
|
||||
}
|
||||
}
|
||||
@@ -1,39 +1,12 @@
|
||||
#!/bin/bash
|
||||
plugin_name="csi.san.synology.com"
|
||||
min_support_minor=19
|
||||
max_support_minor=20
|
||||
deploy_k8s_version="v1".$min_support_minor
|
||||
deploy_k8s_version="v1.19"
|
||||
|
||||
SCRIPT_PATH="$(realpath "$0")"
|
||||
SOURCE_PATH="$(realpath "$(dirname "${SCRIPT_PATH}")"/../)"
|
||||
config_file="${SOURCE_PATH}/config/client-info.yml"
|
||||
plugin_dir="/var/lib/kubelet/plugins/$plugin_name"
|
||||
|
||||
parse_version(){
|
||||
ver=$(kubectl version --short | grep Server | awk '{print $3}')
|
||||
major=$(echo "${ver##*v}" | cut -d'.' -f1)
|
||||
minor=$(echo "${ver##*v}" | cut -d'.' -f2)
|
||||
|
||||
if [[ "$major" != 1 ]]; then
|
||||
echo "Version not supported: $ver"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
case "$minor" in
|
||||
19|20)
|
||||
deploy_k8s_version="v1".$minor
|
||||
;;
|
||||
*)
|
||||
if [[ $minor -lt $min_support_minor ]]; then
|
||||
deploy_k8s_version="v1".$min_support_minor
|
||||
else
|
||||
deploy_k8s_version="v1".$max_support_minor
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
echo "Deploy Version: $deploy_k8s_version"
|
||||
}
|
||||
|
||||
# 1. Build
|
||||
csi_build(){
|
||||
echo "==== Build synology-csi .... ===="
|
||||
@@ -43,7 +16,6 @@ csi_build(){
|
||||
# 2. Install
|
||||
csi_install(){
|
||||
echo "==== Creates namespace and secrets, then installs synology-csi ===="
|
||||
parse_version
|
||||
|
||||
kubectl create ns synology-csi
|
||||
kubectl create secret -n synology-csi generic client-info-secret --from-file="$config_file"
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/common"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
|
||||
)
|
||||
|
||||
@@ -52,30 +51,6 @@ var cmdDsmLogin = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
// Always get the first client from ClientInfo for synocli testing
|
||||
func LoginDsmForTest() (*webapi.DSM, error) {
|
||||
info, err := common.LoadConfig("./config/client-info.yml")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to read config: %v", err)
|
||||
}
|
||||
if len(info.Clients) == 0 {
|
||||
return nil, fmt.Errorf("No client in client-info.yml")
|
||||
}
|
||||
|
||||
dsm := &webapi.DSM{
|
||||
Ip: info.Clients[0].Host,
|
||||
Port: info.Clients[0].Port,
|
||||
Username: info.Clients[0].Username,
|
||||
Password: info.Clients[0].Password,
|
||||
Https: info.Clients[0].Https,
|
||||
}
|
||||
|
||||
if err := dsm.Login(); err != nil {
|
||||
return nil, fmt.Errorf("Failed to login to DSM: [%s]. err: %v", dsm.Ip, err)
|
||||
}
|
||||
return dsm, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdDsm.AddCommand(cmdDsmLogin)
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@ var rootCmd = &cobra.Command{
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(cmdDsm)
|
||||
rootCmd.AddCommand(cmdShare)
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
|
||||
@@ -1,580 +0,0 @@
|
||||
/*
|
||||
* Copyright 2022 Synology Inc.
|
||||
*/
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
|
||||
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
|
||||
"strconv"
|
||||
|
||||
"text/tabwriter"
|
||||
)
|
||||
|
||||
var cmdShare = &cobra.Command{
|
||||
Use: "share",
|
||||
Short: "share API",
|
||||
Long: `DSM share API`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
cmd.Help()
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareGet = &cobra.Command{
|
||||
Use: "get <name>",
|
||||
Short: "get share",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
share, err := dsm.ShareGet(args[0])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Success, ShareGet(%s) = %#v\n", args[0], share)
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareList = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "list shares",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
shares, err := dsm.ShareList()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 8, 0, 2, ' ', 0)
|
||||
|
||||
fmt.Fprintf(tw, "%s\t%-32s\t%-10s\t%-64s\t%s\t%s\t%s\t%s\t%-10s\t%s\t%s\n",
|
||||
"id:", "Name:", "VolPath", "Desc:", "Quota(MB):", "ShareCow:", "RecycleBin:", "RBAdminOnly:", "Encryption:", "CanSnap:", "Uuid:")
|
||||
for i, info := range shares {
|
||||
fmt.Fprintf(tw, "%d\t%-32s\t", i, info.Name)
|
||||
fmt.Fprintf(tw, "%-10s\t", info.VolPath)
|
||||
fmt.Fprintf(tw, "%-64s\t", info.Desc)
|
||||
fmt.Fprintf(tw, "%d\t", info.QuotaValueInMB)
|
||||
fmt.Fprintf(tw, "%v\t", info.EnableShareCow)
|
||||
fmt.Fprintf(tw, "%v\t", info.EnableRecycleBin)
|
||||
fmt.Fprintf(tw, "%v\t", info.RecycleBinAdminOnly)
|
||||
fmt.Fprintf(tw, "%-10d\t", info.Encryption)
|
||||
fmt.Fprintf(tw, "%v\t", info.SupportSnapshot)
|
||||
fmt.Fprintf(tw, "%s\t", info.Uuid)
|
||||
fmt.Fprintf(tw, "\n")
|
||||
|
||||
_ = tw.Flush()
|
||||
}
|
||||
|
||||
fmt.Printf("Success, ShareList()\n")
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareCreate = &cobra.Command{
|
||||
Use: "create <name> <location> [<size_bytes>]",
|
||||
Short: "create share",
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
var size int64 = 0
|
||||
if len(args) >= 3 {
|
||||
size, err = strconv.ParseInt(args[2], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
sizeInMB := utils.BytesToMBCeil(size)
|
||||
testSpec := webapi.ShareCreateSpec{
|
||||
Name: args[0],
|
||||
ShareInfo: webapi.ShareInfo{
|
||||
Name: args[0],
|
||||
VolPath: args[1],
|
||||
Desc: "Created by synocli",
|
||||
EnableShareCow: false,
|
||||
EnableRecycleBin: true,
|
||||
RecycleBinAdminOnly: true,
|
||||
Encryption: 0,
|
||||
QuotaForCreate: &sizeInMB,
|
||||
},
|
||||
}
|
||||
|
||||
fmt.Printf("spec = %#v, sizeInMB = %d\n", testSpec, sizeInMB)
|
||||
err = dsm.ShareCreate(testSpec)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
share, err := dsm.ShareGet(args[0])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("Success, ShareCreate(%s) resp = %#v\n", args[0], share)
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareDelete = &cobra.Command{
|
||||
Use: "delete <name>",
|
||||
Short: "delete share",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
err = dsm.ShareDelete(args[0])
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Success, ShareDelete(%s)\n", args[0])
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareClone = &cobra.Command{
|
||||
Use: "clone <new name> <src name> <is_snapshot [true|false]>",
|
||||
Short: "clone share",
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
fromSnapshot := false
|
||||
if len(args) >= 3 && args[2] == "true" {
|
||||
fromSnapshot = true
|
||||
}
|
||||
|
||||
newName := args[0]
|
||||
srcName := args[1]
|
||||
orgShareName := ""
|
||||
snapshot := ""
|
||||
if fromSnapshot {
|
||||
snapshot = srcName
|
||||
shares, err := dsm.ShareList()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for _, share := range shares {
|
||||
snaps, err := dsm.ShareSnapshotList(share.Name)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
for _, snap := range snaps {
|
||||
if snap.Time != srcName {
|
||||
continue
|
||||
}
|
||||
orgShareName = share.Name
|
||||
break
|
||||
}
|
||||
if orgShareName != "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if orgShareName == "" {
|
||||
fmt.Println("Failed to find org Share of the snapshot")
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
orgShareName = srcName
|
||||
}
|
||||
srcShare, err := dsm.ShareGet(orgShareName)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
shareSpec := webapi.ShareCloneSpec{
|
||||
Name: newName,
|
||||
Snapshot: snapshot,
|
||||
ShareInfo: webapi.ShareInfo{
|
||||
Name: newName,
|
||||
VolPath: srcShare.VolPath,
|
||||
Desc: "Cloned from "+srcName+" by synocli",
|
||||
EnableRecycleBin: srcShare.EnableRecycleBin,
|
||||
RecycleBinAdminOnly: srcShare.RecycleBinAdminOnly,
|
||||
NameOrg: orgShareName,
|
||||
},
|
||||
}
|
||||
fmt.Printf("newName: %s, fromSnapshot: %v (%s), orgShareName: %s\n", newName, fromSnapshot, snapshot, orgShareName)
|
||||
_, err = dsm.ShareClone(shareSpec)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
share, err := dsm.ShareGet(newName)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("Success, ShareClone(%s, %s) new share = %#v\n", newName, srcName, share)
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareSnapshotCreate = &cobra.Command{
|
||||
Use: "snap_create <src name> <desc> <is_lock [true|false]>",
|
||||
Short: "create share snapshot (only share located in btrfs volume can take snapshots)",
|
||||
Args: cobra.MinimumNArgs(3),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
spec := webapi.ShareSnapshotCreateSpec{
|
||||
ShareName: args[0],
|
||||
Desc: args[1],
|
||||
IsLocked: utils.StringToBoolean(args[2]),
|
||||
}
|
||||
|
||||
fmt.Printf("spec = %#v\n", spec)
|
||||
snapTime, err := dsm.ShareSnapshotCreate(spec)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("resp = %s\n", snapTime)
|
||||
|
||||
snaps, err := dsm.ShareSnapshotList(spec.ShareName)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var snapshot webapi.ShareSnapshotInfo
|
||||
for _, snap := range snaps {
|
||||
if snap.Time == snapTime {
|
||||
snapshot = snap
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("Success, ShareSnapshotCreate(%s), snap = %#v\n", args[0], snapshot)
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareSnapshotDelete = &cobra.Command{
|
||||
Use: "snap_delete <share name> <snapshot time>",
|
||||
Short: "delete share snapshot",
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
if err := dsm.ShareSnapshotDelete(args[1], args[0]); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("Success, ShareSnapshotDelete(%s, %s)\n", args[1], args[0])
|
||||
},
|
||||
}
|
||||
|
||||
func shareSnapshotListAll(dsm *webapi.DSM, shareName string) ([]webapi.ShareSnapshotInfo, error) {
|
||||
if shareName != "" {
|
||||
return dsm.ShareSnapshotList(shareName)
|
||||
}
|
||||
|
||||
var infos []webapi.ShareSnapshotInfo
|
||||
shares, err := dsm.ShareList()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, share := range shares {
|
||||
snaps, err := dsm.ShareSnapshotList(share.Name)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
infos = append(infos, snaps...)
|
||||
}
|
||||
|
||||
return infos, nil
|
||||
}
|
||||
|
||||
var cmdShareSnapshotList = &cobra.Command{
|
||||
Use: "snap_list [<share name>]",
|
||||
Short: "list share snapshots",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
shareName := ""
|
||||
if len(args) >= 1 {
|
||||
shareName = args[0]
|
||||
}
|
||||
|
||||
snaps, err := shareSnapshotListAll(dsm, shareName)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 8, 0, 2, ' ', 0)
|
||||
|
||||
fmt.Fprintf(tw, "id:\tTime:\tDesc:\tLock:\tScheduleSnapshot:\n")
|
||||
for i, info := range snaps {
|
||||
fmt.Fprintf(tw, "%d\t%-32s\t", i, info.Time)
|
||||
fmt.Fprintf(tw, "%-32s\t", info.Desc)
|
||||
fmt.Fprintf(tw, "%v\t", info.Lock)
|
||||
fmt.Fprintf(tw, "%v\t", info.ScheduleSnapshot)
|
||||
fmt.Fprintf(tw, "\n")
|
||||
|
||||
_ = tw.Flush()
|
||||
}
|
||||
fmt.Printf("Success, ShareSnapList(%s)\n", shareName)
|
||||
},
|
||||
}
|
||||
|
||||
func createSharePermission(name string, acl string) *webapi.SharePermission {
|
||||
var permission webapi.SharePermission
|
||||
permission.Name = name
|
||||
|
||||
switch strings.ToLower(acl) {
|
||||
case "rw":
|
||||
permission.IsWritable = true
|
||||
case "ro":
|
||||
permission.IsReadonly = true
|
||||
case "no":
|
||||
permission.IsDeny = true
|
||||
default:
|
||||
fmt.Println("[ERROR] Unknown ACL")
|
||||
return nil
|
||||
}
|
||||
return &permission
|
||||
}
|
||||
|
||||
func getShareLocalUserPermission(dsm *webapi.DSM, shareName string, userName string) (*webapi.SharePermission, error) {
|
||||
infos, err := dsm.SharePermissionList(shareName, "local_user")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, info := range infos {
|
||||
if info.Name == userName {
|
||||
return &info, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("Permission Not Found.")
|
||||
}
|
||||
|
||||
var cmdSharePermissionList = &cobra.Command{
|
||||
Use: "permission_list <name> [local_user|local_group|system]",
|
||||
Short: "list permissions",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
userGroupType := "local_user"
|
||||
if len(args) >= 2 {
|
||||
userGroupType = args[1]
|
||||
}
|
||||
|
||||
infos, err := dsm.SharePermissionList(args[0], userGroupType)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 8, 0, 2, ' ', 0)
|
||||
|
||||
fmt.Fprintf(tw, "%s\t%-20s\t%-10s\t%-12s\t%-12s\t%-10s\t%-10s\n", "id:", "Name:", "IsAdmin", "IsReadonly:", "IsWritable:", "IsDeny:", "IsCustom:")
|
||||
for i, info := range infos {
|
||||
fmt.Fprintf(tw, "%d\t", i)
|
||||
fmt.Fprintf(tw, "%-20s\t", info.Name)
|
||||
fmt.Fprintf(tw, "%-10v\t", info.IsAdmin)
|
||||
fmt.Fprintf(tw, "%-12v\t", info.IsReadonly)
|
||||
fmt.Fprintf(tw, "%-12v\t", info.IsWritable)
|
||||
fmt.Fprintf(tw, "%-10v\t", info.IsDeny)
|
||||
fmt.Fprintf(tw, "%-10v\t", info.IsCustom)
|
||||
fmt.Fprintf(tw, "\n")
|
||||
|
||||
_ = tw.Flush()
|
||||
}
|
||||
|
||||
fmt.Printf("Success, SharePermissionList(%s)\n", args[0])
|
||||
},
|
||||
}
|
||||
|
||||
var cmdSharePermissionSet = &cobra.Command{
|
||||
Use: "permission_set <share name> <username> <[rw|ro|no]>",
|
||||
Short: "set permission",
|
||||
Args: cobra.MinimumNArgs(3),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
shareName := args[0]
|
||||
userName := args[1]
|
||||
userGroupType := "local_user"
|
||||
permission := createSharePermission(userName, args[2])
|
||||
if permission == nil {
|
||||
fmt.Println("Failed. Invalid Argument.")
|
||||
os.Exit(1)
|
||||
}
|
||||
permissions := append([]*webapi.SharePermission{}, permission)
|
||||
|
||||
spec := webapi.SharePermissionSetSpec{
|
||||
Name: shareName,
|
||||
UserGroupType: userGroupType,
|
||||
Permissions: permissions,
|
||||
}
|
||||
|
||||
fmt.Printf("spec = %#v\n", spec)
|
||||
if err := dsm.SharePermissionSet(spec); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
newPermission, err := getShareLocalUserPermission(dsm, shareName, userName)
|
||||
if err != nil {
|
||||
fmt.Printf("Failed to get share local_user permission(%s, %s): %v\n", shareName, userName, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
fmt.Printf("Success, SharePermissionSet(%s), newPermission: %#v\n", shareName, newPermission)
|
||||
},
|
||||
}
|
||||
|
||||
var cmdShareSet = &cobra.Command{
|
||||
Use: "set <share name> <new size>",
|
||||
Short: "share set (only share located in btrfs volume can be resized)",
|
||||
Args: cobra.MinimumNArgs(2),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
dsm, err := LoginDsmForTest()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
defer func() {
|
||||
dsm.Logout()
|
||||
}()
|
||||
|
||||
shareName := args[0]
|
||||
newSize, err := strconv.ParseInt(args[1], 10, 64)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
share, err := dsm.ShareGet(shareName)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("old = %#v\n", share)
|
||||
|
||||
newSizeInMB := utils.BytesToMBCeil(newSize)
|
||||
updateInfo := webapi.ShareUpdateInfo{
|
||||
Name: share.Name,
|
||||
VolPath: share.VolPath,
|
||||
QuotaForCreate: &newSizeInMB,
|
||||
}
|
||||
if err := dsm.ShareSet(shareName, updateInfo); err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
newShare, err := dsm.ShareGet(shareName)
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
fmt.Printf("new = %#v\n", newShare)
|
||||
|
||||
fmt.Printf("Success, ShareSet(%s), quota: %v -> %v MB\n", shareName, share.QuotaValueInMB, newShare.QuotaValueInMB)
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
func init() {
|
||||
cmdShare.AddCommand(cmdShareGet)
|
||||
cmdShare.AddCommand(cmdShareCreate)
|
||||
cmdShare.AddCommand(cmdShareDelete)
|
||||
cmdShare.AddCommand(cmdShareList)
|
||||
cmdShare.AddCommand(cmdShareClone)
|
||||
cmdShare.AddCommand(cmdShareSnapshotCreate)
|
||||
cmdShare.AddCommand(cmdShareSnapshotDelete)
|
||||
cmdShare.AddCommand(cmdShareSnapshotList)
|
||||
|
||||
cmdShare.AddCommand(cmdSharePermissionList)
|
||||
cmdShare.AddCommand(cmdSharePermissionSet)
|
||||
cmdShare.AddCommand(cmdShareSet)
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
# CreateVolumeSecret:
|
||||
# DeleteVolumeSecret:
|
||||
# ControllerPublishVolumeSecret:
|
||||
# ControllerUnpublishVolumeSecret:
|
||||
# ControllerValidateVolumeCapabilitiesSecret:
|
||||
NodeStageVolumeSecret:
|
||||
username: "username"
|
||||
password: "password"
|
||||
# NodePublishVolumeSecret:
|
||||
# CreateSnapshotSecret:
|
||||
# DeleteSnapshotSecret:
|
||||
# ControllerExpandVolumeSecret:
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
|
||||
const (
|
||||
ConfigPath = "./../../config/client-info.yml"
|
||||
SecretsFilePath = "./sanity-test-secret-file.yaml"
|
||||
)
|
||||
|
||||
func TestSanity(t *testing.T) {
|
||||
@@ -70,15 +69,6 @@ func TestSanity(t *testing.T) {
|
||||
testConfig.TargetPath = targetPath
|
||||
testConfig.StagingPath = stagingPath
|
||||
testConfig.Address = endpoint
|
||||
testConfig.SecretsFile = SecretsFilePath
|
||||
|
||||
// Set Input parameters for test
|
||||
testConfig.TestVolumeParameters = map[string]string{
|
||||
"protocol": "smb",
|
||||
}
|
||||
|
||||
// testConfig.TestVolumeAccessType = "block" // raw block
|
||||
|
||||
// Run test
|
||||
sanity.Test(t, testConfig)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user