33 Commits

Author SHA1 Message Date
CALIN Cristian Andrei
f1b069f8a8 reneable healthcheck for csi-node-driver 2022-05-17 10:41:34 +03:00
CALIN Cristian Andrei
4dd2a104d8 [kube1.23] disable health checking csi-node-driver-registrar due to bug in v2.5.0 but missing v2.5.1 image 2022-05-14 10:25:53 +03:00
CALIN Cristian Andrei
6037fb4267 [kube1.23] add health checking to csi-node-driver-registrar 2022-05-14 10:23:16 +03:00
CALIN Cristian Andrei
36a72ef722 [kube1.23] k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.1 has issues with pulling 2022-05-14 10:18:33 +03:00
CALIN Cristian Andrei
646075d656 [kube1.23] update resizer and node-driver-registrar images 2022-05-14 10:15:14 +03:00
CALIN Cristian Andrei
2b8bb9323c [sync-upstream] align with v1.1.0 from upstream 2022-05-14 10:07:34 +03:00
CALIN Cristian Andrei
6708d8e0fc [kube1.23] fix csi snapshotter 2022-05-14 10:02:52 +03:00
CALIN Cristian Andrei
1884d64aff fallback on fsGroupPolicy 2022-05-14 10:02:52 +03:00
CALIN Cristian Andrei
5fcd762126 [kube1.23] update some images 2022-05-14 10:02:52 +03:00
CALIN Cristian Andrei
eea3e812bf [kube1.23] sync with release-v1.0.1 upstream 2022-05-14 10:02:52 +03:00
CALIN Cristian Andrei
77a52798d2 [v1.23] add support for kubernetes 1.23 2022-05-14 10:02:52 +03:00
CALIN Cristian Andrei
545c3e3a06 upgrade attacher to 3.4.0 2022-05-14 10:02:52 +03:00
CALIN Cristian Andrei
30d4700e0f update csi images to gcr.io images 2022-05-14 10:02:52 +03:00
CALIN Cristian Andrei
6e864c36c5 update snapshotter
* fix snapshotter port
* update snapshotter rbac
2022-05-14 10:02:40 +03:00
CALIN Cristian Andrei
e2f5e18dcf add leader election to attacher container 2022-05-14 10:01:43 +03:00
CALIN Cristian Andrei
65aef64e18 adjust logging and disable capacity for now 2022-05-14 10:01:43 +03:00
CALIN Cristian Andrei
2c1f01f6cd fix capacity owner ref 2022-05-14 10:01:43 +03:00
CALIN Cristian Andrei
50481366ba grant privilege to get statefullsets 2022-05-14 10:01:43 +03:00
CALIN Cristian Andrei
796c41f934 fix pod namespace env var 2022-05-14 10:01:43 +03:00
CALIN Cristian Andrei
d95ee2a07d fix port overlaps and add healthchecks 2022-05-14 10:01:35 +03:00
CALIN Cristian Andrei
fcc1f63cea update to latest csi plugins 2022-05-14 10:01:12 +03:00
CALIN Cristian Andrei
8333855c2a make default storage class
* ensure snapshots are retained for backup reasons
2022-05-14 10:01:02 +03:00
CALIN Cristian Andrei
3de947a094 use my own image 2022-05-14 09:58:17 +03:00
CALIN Cristian Andrei
313bb0e7b7 support kubernetes 1.22 2022-05-14 09:58:17 +03:00
CALIN Cristian Andrei
32ebdd9ac1 build with golang 1.16 2022-05-14 09:58:12 +03:00
chihyuwu
fc3359223f fix #34 - default to iSCSI for backward compatibility 2022-04-29 05:52:12 +00:00
chihyuwu
ebe7c1d97c Update to version 1.1.0 2022-04-25 11:10:19 +00:00
Chih Yu Wu
f0066b40ca Merge pull request #15 from ressu/fix-unstage
Logout iSCSI target on Unstage
2022-02-23 14:30:00 +08:00
chihyuwu
f4cbfc8392 fix #27 - use imagePullPolicy: IfNotPresent for csi driver 2022-02-16 06:51:07 +00:00
chihyuwu
515bc7f0ed Update to version 1.0.1 2022-02-14 07:56:51 +00:00
Synology Open Source
bca0c705f0 Merge pull request #24 from jeanfabrice/patch-1
add btrfs-progs package
2022-02-08 18:53:22 +08:00
Jean-Fabrice Bobo
228cbaa0ff add btrfs-progs package
to fix support of `fsType: btrfs`  storageClass parameters
2022-02-06 02:32:18 +01:00
Sami Haahtinen
7ba1bd8faa Logout of iSCSI target on Unstage
This cleans up the logic since Unstage is always called when node is
no longer using the volume. We don't need to have odd checks in
UnpublishVolume call.
2021-12-28 13:34:53 +00:00
42 changed files with 3340 additions and 476 deletions

2
.gitignore vendored
View File

@@ -1 +1,3 @@
bin/
config/client-info.yml
test/sanity/sanity-test-secret-file.yaml

View File

@@ -12,16 +12,20 @@ RUN go mod download
COPY Makefile .
ARG TARGETPLATFORM
COPY main.go .
COPY pkg ./pkg
RUN make
RUN env GOARCH=$(echo "$TARGETPLATFORM" | cut -f2 -d/) \
GOARM=$(echo "$TARGETPLATFORM" | cut -f3 -d/ | cut -c2-) \
make
############## Final stage ##############
FROM alpine:latest
LABEL maintainers="Synology Authors" \
description="Synology CSI Plugin"
RUN apk add --no-cache e2fsprogs e2fsprogs-extra xfsprogs xfsprogs-extra blkid util-linux iproute2 bash
RUN apk add --no-cache e2fsprogs e2fsprogs-extra xfsprogs xfsprogs-extra blkid util-linux iproute2 bash btrfs-progs ca-certificates cifs-utils
# Create symbolic link for chroot.sh
WORKDIR /

View File

@@ -2,12 +2,15 @@
REGISTRY_NAME=synology
IMAGE_NAME=synology-csi
IMAGE_VERSION=v1.0.0
IMAGE_VERSION=v1.1.0
IMAGE_TAG=$(REGISTRY_NAME)/$(IMAGE_NAME):$(IMAGE_VERSION)
# For now, only build linux/amd64 platform
GOARCH?=amd64
BUILD_ENV=CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH)
ifeq ($(GOARCH),)
GOARCH:=amd64
endif
GOARM?=""
BUILD_ENV=CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) GOARM=$(GOARM)
BUILD_FLAGS="-extldflags \"-static\""
.PHONY: all clean synology-csi-driver synocli test docker-build
@@ -21,11 +24,15 @@ synology-csi-driver:
docker-build:
docker build -f Dockerfile -t $(IMAGE_TAG) .
docker-build-multiarch:
docker buildx build -t $(IMAGE_TAG) --platform linux/amd64,linux/arm/v7,linux/arm64 . --push
synocli:
@mkdir -p bin
$(BUILD_ENV) go build -v -ldflags $(BUILD_FLAGS) -o ./bin/synocli ./synocli
test:
go clean -testcache
go test -v ./test/...
clean:
-rm -rf ./bin

View File

@@ -4,9 +4,11 @@ The official [Container Storage Interface](https://github.com/container-storage-
### Container Images & Kubernetes Compatibility
Driver Name: csi.san.synology.com
| Driver Version | Image | Supported K8s Version |
| -------------- | --------------------------------------------------------------------- | --------------------- |
| v1.0.0 | [synology-csi:v1.0.0](https://hub.docker.com/r/synology/synology-csi) | 1.19 |
| Driver Version | Image | Supported K8s Version |
| -------------------------------------------------------------------------------- | --------------------------------------------------------------------- | --------------------- |
| [v1.1.0](https://github.com/SynologyOpenSource/synology-csi/tree/release-v1.1.0) | [synology-csi:v1.1.0](https://hub.docker.com/r/synology/synology-csi) | 1.20+ |
| [v1.0.1](https://github.com/SynologyOpenSource/synology-csi/tree/release-v1.0.1) | [synology-csi:v1.0.1](https://hub.docker.com/r/synology/synology-csi) | 1.20+ |
| [v1.0.0](https://github.com/SynologyOpenSource/synology-csi/tree/release-v1.0.0) | [synology-csi:v1.0.0](https://hub.docker.com/r/synology/synology-csi) | 1.19 |
@@ -18,7 +20,7 @@ The Synology CSI driver supports:
## Installation
### Prerequisites
- Kubernetes versions 1.19
- Kubernetes versions 1.19 or above
- Synology NAS running DSM 7.0 or above
- Go version 1.16 or above is recommended
- (Optional) Both [Volume Snapshot CRDs](https://github.com/kubernetes-csi/external-snapshotter/tree/v4.0.0/client/config/crd) and the [common snapshot controller](https://github.com/kubernetes-csi/external-snapshotter/tree/v4.0.0/deploy/kubernetes/snapshot-controller) must be installed in your Kubernetes cluster if you want to use the **Snapshot** feature
@@ -98,6 +100,7 @@ Create and apply StorageClasses with the properties you want.
1. Create YAML files using the one at `deploy/kubernetes/<k8s version>/storage-class.yml` as the example, whose content is as below:
**iSCSI Protocol**
```
apiVersion: storage.k8s.io/v1
kind: StorageClass
@@ -113,18 +116,56 @@ Create and apply StorageClasses with the properties you want.
reclaimPolicy: Retain
allowVolumeExpansion: true
```
**SMB/CIFS Protocol**
Before creating an SMB/CIFS storage class, you must **create a secret** and specify the DSM user whom you want to give permissions to.
```
apiVersion: v1
kind: Secret
metadata:
name: cifs-csi-credentials
namespace: default
type: Opaque
stringData:
username: <username> # DSM user account accessing the shared folder
password: <password> # DSM user password accessing the shared folder
```
After creating the secret, create a storage class and fill the secret for node-stage-secret. This is a **required** step if you're using SMB, or there will be errors when staging volumes.
```
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: synostorage-smb
provisioner: csi.san.synology.com
parameters:
protocol: "smb"
dsm: '192.168.1.1'
location: '/volume1'
csi.storage.k8s.io/node-stage-secret-name: "cifs-csi-credentials"
csi.storage.k8s.io/node-stage-secret-namespace: "default"
reclaimPolicy: Delete
allowVolumeExpansion: true
```
2. Configure the StorageClass properties by assigning the parameters in the table. You can also leave blank if you dont have a preference:
| Name | Type | Description | Default |
| ---------- | ------ | ----------------------------------------------------------------------------------------------------------------- | ------- |
| *dsm* | string | The IPv4 address of your DSM, which must be included in the `client-info.yml` for the CSI driver to log in to DSM | - |
| *location* | string | The location (/volume1, /volume2, ...) on DSM where the LUN for *PersistentVolume* will be created | - |
| *fsType* | string | The formatting file system of the *PersistentVolumes* when you mount them on the pods | 'ext4' |
| Name | Type | Description | Default | Supported protocols |
| ------------------------------------------------ | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------- | ------------------- |
| *dsm* | string | The IPv4 address of your DSM, which must be included in the `client-info.yml` for the CSI driver to log in to DSM | - | iSCSI, SMB |
| *location* | string | The location (/volume1, /volume2, ...) on DSM where the LUN for *PersistentVolume* will be created | - | iSCSI, SMB |
| *fsType* | string | The formatting file system of the *PersistentVolumes* when you mount them on the pods. This parameter only works with iSCSI. For SMB, the fsType is always cifs. | 'ext4' | iSCSI |
| *protocol* | string | The backing storage protocol. Enter iscsi to create LUNs or smb to create shared folders on DSM. | 'iscsi' | iSCSI, SMB |
| *csi.storage.k8s.io/node-stage-secret-name* | string | The name of node-stage-secret. Required if DSM shared folder is accessed via SMB. | - | SMB |
| *csi.storage.k8s.io/node-stage-secret-namespace* | string | The namespace of node-stage-secret. Required if DSM shared folder is accessed via SMB. | - | SMB |
**Notice**
- If you leave the parameter *location* blank, the CSI driver will choose a volume on DSM with available storage to create the volumes.
- All volumes created by the CSI driver are Thin Provisioned LUNs on DSM. This will allow you to take snapshots of them.
- All iSCSI volumes created by the CSI driver are Thin Provisioned LUNs on DSM. This will allow you to take snapshots of them.
3. Apply the YAML files to the Kubernetes cluster.
@@ -153,10 +194,10 @@ Create and apply VolumeSnapshotClasses with the properties you want.
2. Configure volume snapshot class properties by assigning the following parameters, all parameters are optional:
| Name | Type | Description | Default |
| ------------- | ------ | -------------------------------------------- | ------- |
| *description* | string | The description of the snapshot on DSM | "" |
| *is_locked* | string | Whether you want to lock the snapshot on DSM | 'false' |
| Name | Type | Description | Default | Supported protocols |
| ------------- | ------ | -------------------------------------------- | ------- | ------------------- |
| *description* | string | The description of the snapshot on DSM | "" | iSCSI |
| *is_locked* | string | Whether you want to lock the snapshot on DSM | 'false' | iSCSI, SMB |
3. Apply the YAML files to the Kubernetes cluster.

View File

@@ -0,0 +1,20 @@
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: synology-smb-storage
provisioner: csi.san.synology.com
parameters:
protocol: "smb" # required for smb protocol
# Before creating an SMB storage class, you must create a secret and specify the DSM user whom you want to give permissions to.
csi.storage.k8s.io/node-stage-secret-name: "cifs-csi-credentials" # required for smb protocol
csi.storage.k8s.io/node-stage-secret-namespace: "default" # required for smb protocol
# dsm: "1.1.1.1"
# location: '/volume1'
# mountOptions:
# - dir_mode=0777
# - file_mode=0777
# - uid=0
# - gid=0
reclaimPolicy: Delete
allowVolumeExpansion: true

View File

@@ -25,6 +25,9 @@ rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
@@ -32,7 +35,7 @@ rules:
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
resources: ["volumeattachments", "volumeattachments/status"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
@@ -43,6 +46,9 @@ rules:
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
@@ -85,7 +91,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: quay.io/k8scsi/csi-provisioner:v1.6.0
image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2
args:
- --timeout=60s
- --csi-address=$(ADDRESS)
@@ -103,7 +109,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: quay.io/k8scsi/csi-attacher:v2.1.0
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
args:
- --v=5
- --csi-address=$(ADDRESS)
@@ -120,7 +126,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: quay.io/k8scsi/csi-resizer:v0.5.0
image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
args:
- --v=5
- --csi-address=$(ADDRESS)
@@ -137,7 +143,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: synology/synology-csi:v1.0.0
image: synology/synology-csi:v1.1.0
args:
- --nodeid=NotUsed
- --endpoint=$(CSI_ENDPOINT)

View File

@@ -63,7 +63,7 @@ spec:
securityContext:
privileged: true
imagePullPolicy: Always
image: quay.io/k8scsi/csi-node-driver-registrar:v1.2.0
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
args:
- --v=5
- --csi-address=$(ADDRESS) # the csi socket path inside the pod
@@ -86,7 +86,7 @@ spec:
securityContext:
privileged: true
imagePullPolicy: IfNotPresent
image: synology/synology-csi:v1.0.0
image: synology/synology-csi:v1.1.0
args:
- --nodeid=$(KUBE_NODE_NAME)
- --endpoint=$(CSI_ENDPOINT)

View File

@@ -64,7 +64,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: quay.io/k8scsi/csi-snapshotter:v3.0.3
image: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.3
args:
- --v=5
- --csi-address=$(ADDRESS)
@@ -81,7 +81,7 @@ spec:
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: synology/synology-csi:v1.0.0
image: synology/synology-csi:v1.1.0
args:
- --nodeid=NotUsed
- --endpoint=$(CSI_ENDPOINT)

View File

@@ -7,5 +7,5 @@ metadata:
driver: csi.san.synology.com
deletionPolicy: Delete
# parameters:
# description: 'Kubernetes CSI'
# description: 'Kubernetes CSI' only for iscsi protocol
# is_locked: 'false'

View File

@@ -0,0 +1,168 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-controller-sa
namespace: synology-csi
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-controller-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments", "volumeattachments/status"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-controller-role
namespace: synology-csi
subjects:
- kind: ServiceAccount
name: csi-controller-sa
namespace: synology-csi
roleRef:
kind: ClusterRole
name: synology-csi-controller-role
apiGroup: rbac.authorization.k8s.io
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: synology-csi-controller
namespace: synology-csi
spec:
serviceName: "synology-csi-controller"
replicas: 1
selector:
matchLabels:
app: synology-csi-controller
template:
metadata:
labels:
app: synology-csi-controller
spec:
serviceAccountName: csi-controller-sa
hostNetwork: true
containers:
- name: csi-provisioner
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
args:
- --timeout=60s
- --csi-address=$(ADDRESS)
- --v=5
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: Always
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-attacher
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
args:
- --v=5
- --csi-address=$(ADDRESS)
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: Always
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-resizer
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: k8s.gcr.io/sig-storage/csi-resizer:v1.3.0
args:
- --v=5
- --csi-address=$(ADDRESS)
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: Always
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-plugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: synology/synology-csi:v1.1.0
args:
- --nodeid=NotUsed
- --endpoint=$(CSI_ENDPOINT)
- --client-info
- /etc/synology/client-info.yml
- --log-level=info
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: client-info
mountPath: /etc/synology
readOnly: true
volumes:
- name: socket-dir
emptyDir: {}
- name: client-info
secret:
secretName: client-info-secret

View File

@@ -0,0 +1,9 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi.san.synology.com
spec:
attachRequired: true # Indicates the driver requires an attach operation (TODO: ControllerPublishVolume should be implemented)
podInfoOnMount: true
volumeLifecycleModes:
- Persistent

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: synology-csi

View File

@@ -0,0 +1,139 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-node-sa
namespace: synology-csi
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-node-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-node-role
namespace: synology-csi
subjects:
- kind: ServiceAccount
name: csi-node-sa
namespace: synology-csi
roleRef:
kind: ClusterRole
name: synology-csi-node-role
apiGroup: rbac.authorization.k8s.io
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: synology-csi-node
namespace: synology-csi
spec:
selector:
matchLabels:
app: synology-csi-node
template:
metadata:
labels:
app: synology-csi-node
spec:
serviceAccount: csi-node-sa
hostNetwork: true
containers:
- name: csi-driver-registrar
securityContext:
privileged: true
imagePullPolicy: Always
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
args:
- --v=5
- --csi-address=$(ADDRESS) # the csi socket path inside the pod
- --kubelet-registration-path=$(REGISTRATION_PATH) # the csi socket path on the host node
env:
- name: ADDRESS
value: /csi/csi.sock
- name: REGISTRATION_PATH
value: /var/lib/kubelet/plugins/csi.san.synology.com/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
- name: csi-plugin
securityContext:
privileged: true
imagePullPolicy: IfNotPresent
image: synology/synology-csi:v1.1.0
args:
- --nodeid=$(KUBE_NODE_NAME)
- --endpoint=$(CSI_ENDPOINT)
- --client-info
- /etc/synology/client-info.yml
- --log-level=info
env:
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /csi
- name: client-info
mountPath: /etc/synology
readOnly: true
- name: host-root
mountPath: /host
- name: device-dir
mountPath: /dev
volumes:
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/csi.san.synology.com/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
- name: client-info
secret:
secretName: client-info-secret
- name: host-root
hostPath:
path: /
type: Directory
- name: device-dir
hostPath:
path: /dev
type: Directory

View File

@@ -0,0 +1,106 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-snapshotter-sa
namespace: synology-csi
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-snapshotter-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-snapshotter-role
namespace: synology-csi
subjects:
- kind: ServiceAccount
name: csi-snapshotter-sa
namespace: synology-csi
roleRef:
kind: ClusterRole
name: synology-csi-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: synology-csi-snapshotter
namespace: synology-csi
spec:
serviceName: "synology-csi-snapshotter"
replicas: 1
selector:
matchLabels:
app: synology-csi-snapshotter
template:
metadata:
labels:
app: synology-csi-snapshotter
spec:
serviceAccountName: csi-snapshotter-sa
hostNetwork: true
containers:
- name: csi-snapshotter
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.2.1
args:
- --v=5
- --csi-address=$(ADDRESS)
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: Always
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: csi-plugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: synology/synology-csi:v1.1.0
args:
- --nodeid=NotUsed
- --endpoint=$(CSI_ENDPOINT)
- --client-info
- /etc/synology/client-info.yml
- --log-level=info
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: client-info
mountPath: /etc/synology
readOnly: true
volumes:
- name: socket-dir
emptyDir: {}
- name: client-info
secret:
secretName: client-info-secret

View File

@@ -0,0 +1,11 @@
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
name: synology-snapshotclass
annotations:
storageclass.kubernetes.io/is-default-class: "false"
driver: csi.san.synology.com
deletionPolicy: Delete
# parameters:
# description: 'Kubernetes CSI' only for iscsi protocol
# is_locked: 'false'

View File

@@ -0,0 +1,14 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: synology-iscsi-storage
# annotations:
# storageclass.kubernetes.io/is-default-class: "true"
provisioner: csi.san.synology.com
# if all params are empty, synology CSI will choose an available location to create volume
# parameters:
# dsm: '1.1.1.1'
# location: '/volume1'
# fsType: 'ext4'
reclaimPolicy: Retain
allowVolumeExpansion: true

View File

@@ -0,0 +1,281 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-controller-sa
namespace: synology-csi
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-controller-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["csi.storage.k8s.io"]
resources: ["csinodeinfos"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments", "volumeattachments/status"]
verbs: ["get", "list", "watch", "update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-controller-role
namespace: synology-csi
subjects:
- kind: ServiceAccount
name: csi-controller-sa
namespace: synology-csi
roleRef:
kind: ClusterRole
name: synology-csi-controller-role
apiGroup: rbac.authorization.k8s.io
---
# Provisioner must be able to work with endpoints in current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: synology-csi
name: synology-provisioner-cfg
rules:
# Only one of the following rules for endpoints or leases is required based on
# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases.
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
# Permissions for CSIStorageCapacity are only needed enabling the publishing
# of storage capacity information.
- apiGroups: ["storage.k8s.io"]
resources: ["csistoragecapacities"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
# The GET permissions below are needed for walking up the ownership chain
# for CSIStorageCapacity. They are sufficient for deployment via
# StatefulSet (only needs to get Pod) and Deployment (needs to get
# Pod and then ReplicaSet to find the Deployment).
- apiGroups: [""]
resources: ["pods"]
verbs: ["get"]
- apiGroups: ["apps"]
resources: ["replicasets"]
verbs: ["get"]
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["get"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-provisioner-role-cfg
namespace: synology-csi
subjects:
- kind: ServiceAccount
name: csi-controller-sa
namespace: synology-csi
roleRef:
kind: Role
name: synology-provisioner-cfg
apiGroup: rbac.authorization.k8s.io
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: synology-csi-controller
namespace: synology-csi
spec:
serviceName: "synology-csi-controller"
replicas: 1
selector:
matchLabels:
app: synology-csi-controller
template:
metadata:
labels:
app: synology-csi-controller
spec:
serviceAccountName: csi-controller-sa
hostNetwork: true
containers:
- name: csi-provisioner
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
args:
- --timeout=60s
- --csi-address=$(ADDRESS)
- --leader-election
# - --enable-capacity
- --http-endpoint=:8080
- --v=5
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
imagePullPolicy: Always
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
ports:
- containerPort: 8080
name: prov-port
protocol: TCP
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: prov-port
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 20
- name: csi-attacher
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
args:
- --v=5
- --csi-address=$(ADDRESS)
- --leader-election
- --http-endpoint=:8081
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
- name: MY_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
imagePullPolicy: Always
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
ports:
- containerPort: 8081
name: attach-port
protocol: TCP
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: attach-port
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 20
- name: csi-resizer
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0
args:
- --v=5
- --csi-address=$(ADDRESS)
- --leader-election
- --http-endpoint=:8082
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: Always
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
ports:
- containerPort: 8082
name: resizer-port
protocol: TCP
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: resizer-port
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 20
- name: csi-plugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: cristicalin/synology-csi:v1.1.0
args:
- --nodeid=$(KUBE_NODE_NAME)
- --endpoint=$(CSI_ENDPOINT)
- --client-info
- /etc/synology/client-info.yml
- --log-level=info
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: client-info
mountPath: /etc/synology
readOnly: true
volumes:
- name: socket-dir
emptyDir: {}
- name: client-info
secret:
secretName: client-info-secret

View File

@@ -0,0 +1,11 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: csi.san.synology.com
spec:
attachRequired: true # Indicates the driver requires an attach operation (TODO: ControllerPublishVolume should be implemented)
podInfoOnMount: true
# fsGroupPolicy: File (see https://kubernetes-csi.github.io/docs/support-fsgroup.html#supported-modes)
fsGroupPolicy: ReadWriteOnceWithFSType
volumeLifecycleModes:
- Persistent

View File

@@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: synology-csi

View File

@@ -0,0 +1,150 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-node-sa
namespace: synology-csi
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-node-role
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-node-role
namespace: synology-csi
subjects:
- kind: ServiceAccount
name: csi-node-sa
namespace: synology-csi
roleRef:
kind: ClusterRole
name: synology-csi-node-role
apiGroup: rbac.authorization.k8s.io
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: synology-csi-node
namespace: synology-csi
spec:
selector:
matchLabels:
app: synology-csi-node
template:
metadata:
labels:
app: synology-csi-node
spec:
serviceAccount: csi-node-sa
hostNetwork: true
containers:
- name: csi-driver-registrar
securityContext:
privileged: true
imagePullPolicy: Always
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.1
# image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.5.0
args:
- --v=5
- --csi-address=$(ADDRESS) # the csi socket path inside the pod
- --kubelet-registration-path=$(REGISTRATION_PATH) # the csi socket path on the host node
- --health-port=9809
env:
- name: ADDRESS
value: /csi/csi.sock
- name: REGISTRATION_PATH
value: /var/lib/kubelet/plugins/csi.san.synology.com/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: plugin-dir
mountPath: /csi
- name: registration-dir
mountPath: /registration
ports:
- containerPort: 9809
name: healthz
livenessProbe:
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 5
timeoutSeconds: 5
- name: csi-plugin
securityContext:
privileged: true
imagePullPolicy: IfNotPresent
image: cristicalin/synology-csi:v1.1.0
args:
- --nodeid=$(KUBE_NODE_NAME)
- --endpoint=$(CSI_ENDPOINT)
- --client-info
- /etc/synology/client-info.yml
- --log-level=info
env:
- name: CSI_ENDPOINT
value: unix://csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: kubelet-dir
mountPath: /var/lib/kubelet
mountPropagation: "Bidirectional"
- name: plugin-dir
mountPath: /csi
- name: client-info
mountPath: /etc/synology
readOnly: true
- name: host-root
mountPath: /host
- name: device-dir
mountPath: /dev
volumes:
- name: kubelet-dir
hostPath:
path: /var/lib/kubelet
type: Directory
- name: plugin-dir
hostPath:
path: /var/lib/kubelet/plugins/csi.san.synology.com/
type: DirectoryOrCreate
- name: registration-dir
hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
- name: client-info
secret:
secretName: client-info-secret
- name: host-root
hostPath:
path: /
type: Directory
- name: device-dir
hostPath:
path: /dev
type: Directory

View File

@@ -0,0 +1,157 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-snapshotter-sa
namespace: synology-csi
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-snapshotter-role
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: synology-csi-snapshotter-role
namespace: synology-csi
subjects:
- kind: ServiceAccount
name: csi-snapshotter-sa
namespace: synology-csi
roleRef:
kind: ClusterRole
name: synology-csi-snapshotter-role
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: synology-csi
name: synology-csi-snapshotter-cfg
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: synology-csi
name: synology-csi-snapshotter-role-cfg
subjects:
- kind: ServiceAccount
name: csi-snapshotter-sa
namespace: synology-csi
roleRef:
kind: Role
name: synology-csi-snapshotter-cfg
apiGroup: rbac.authorization.k8s.io
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: synology-csi-snapshotter
namespace: synology-csi
spec:
serviceName: "synology-csi-snapshotter"
replicas: 1
selector:
matchLabels:
app: synology-csi-snapshotter
template:
metadata:
labels:
app: synology-csi-snapshotter
spec:
serviceAccountName: csi-snapshotter-sa
hostNetwork: true
containers:
- name: csi-snapshotter
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: k8s.gcr.io/sig-storage/csi-snapshotter:v5.0.1
args:
- --v=5
- --csi-address=$(ADDRESS)
- --leader-election
- --http-endpoint=:8083
env:
- name: ADDRESS
value: /var/lib/csi/sockets/pluginproxy/csi.sock
imagePullPolicy: Always
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
ports:
- containerPort: 8083
name: snap-port
protocol: TCP
livenessProbe:
failureThreshold: 1
httpGet:
path: /healthz/leader-election
port: snap-port
initialDelaySeconds: 10
timeoutSeconds: 10
periodSeconds: 20
- name: csi-plugin
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
image: cristicalin/synology-csi:v1.1.0
args:
- --nodeid=$(KUBE_NODE_NAME)
- --endpoint=$(CSI_ENDPOINT)
- --client-info
- /etc/synology/client-info.yml
- --log-level=info
env:
- name: CSI_ENDPOINT
value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
imagePullPolicy: IfNotPresent
volumeMounts:
- name: socket-dir
mountPath: /var/lib/csi/sockets/pluginproxy/
- name: client-info
mountPath: /etc/synology
readOnly: true
volumes:
- name: socket-dir
emptyDir: {}
- name: client-info
secret:
secretName: client-info-secret

View File

@@ -0,0 +1,13 @@
apiVersion: snapshot.storage.k8s.io/v1
kind: VolumeSnapshotClass
metadata:
name: synology-snapshotclass
annotations:
storageclass.kubernetes.io/is-default-class: "false"
labels:
velero.io/csi-volumesnapshot-class: "true"
driver: csi.san.synology.com
deletionPolicy: Retain
# parameters:
# description: 'Kubernetes CSI'
# is_locked: 'false'

View File

@@ -0,0 +1,14 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: synology-iscsi-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: csi.san.synology.com
# if all params are empty, synology CSI will choose an available location to create volume
# parameters:
# dsm: '1.1.1.1'
# location: '/volume1'
# fsType: 'ext4'
reclaimPolicy: Retain
allowVolumeExpansion: true

1
go.mod
View File

@@ -17,6 +17,7 @@ require (
golang.org/x/text v0.3.7 // indirect
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af // indirect
google.golang.org/grpc v1.40.0
google.golang.org/protobuf v1.27.1 // indirect
gopkg.in/yaml.v2 v2.4.0
k8s.io/klog/v2 v2.20.0 // indirect
k8s.io/mount-utils v0.22.1

View File

@@ -19,15 +19,17 @@ package driver
import (
"context"
"fmt"
log "github.com/sirupsen/logrus"
"time"
"sort"
"strconv"
"strings"
"github.com/golang/protobuf/ptypes"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/container-storage-interface/spec/lib/go/csi"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
"github.com/SynologyOpenSource/synology-csi/pkg/interfaces"
"github.com/SynologyOpenSource/synology-csi/pkg/models"
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
@@ -118,36 +120,56 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
isThin = utils.StringToBoolean(params["thin_provisioning"])
}
protocol := strings.ToLower(params["protocol"])
if protocol == "" {
protocol = utils.ProtocolDefault
} else if !isProtocolSupport(protocol) {
return nil, status.Error(codes.InvalidArgument, "Unsupported volume protocol")
}
spec := &models.CreateK8sVolumeSpec{
DsmIp: params["dsm"],
K8sVolumeName: volName,
LunName: fmt.Sprintf("%s-%s", models.LunPrefix, volName),
LunName: models.GenLunName(volName),
ShareName: models.GenShareName(volName),
Location: params["location"],
Size: sizeInByte,
Type: params["type"],
ThinProvisioning: isThin,
TargetName: fmt.Sprintf("%s-%s", models.LunPrefix, volName),
TargetName: fmt.Sprintf("%s-%s", models.TargetPrefix, volName),
MultipleSession: multiSession,
SourceSnapshotId: srcSnapshotId,
SourceVolumeId: srcVolumeId,
Protocol: protocol,
}
lunInfo, dsmIp, err := cs.dsmService.CreateVolume(spec)
if err != nil {
return nil, err
// idempotency
// Note: an SMB PV may not be tested existed precisely because the share folder name was sliced from k8sVolumeName
k8sVolume := cs.dsmService.GetVolumeByName(volName)
if k8sVolume == nil {
k8sVolume, err = cs.dsmService.CreateVolume(spec)
if err != nil {
return nil, err
}
} else {
// already existed
log.Debugf("Volume [%s] already exists in [%s], backing name: [%s]", volName, k8sVolume.DsmIp, k8sVolume.Name)
}
if int64(lunInfo.Size) != sizeInByte {
if (k8sVolume.Protocol == utils.ProtocolIscsi && k8sVolume.SizeInBytes != sizeInByte) ||
(k8sVolume.Protocol == utils.ProtocolSmb && utils.BytesToMB(k8sVolume.SizeInBytes) != utils.BytesToMBCeil(sizeInByte)) {
return nil , status.Errorf(codes.AlreadyExists, "Already existing volume name with different capacity")
}
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
VolumeId: lunInfo.Uuid,
CapacityBytes: int64(lunInfo.Size),
VolumeId: k8sVolume.VolumeId,
CapacityBytes: k8sVolume.SizeInBytes,
ContentSource: volContentSrc,
VolumeContext: map[string]string{
"dsm": dsmIp,
"dsm": k8sVolume.DsmIp,
"protocol": k8sVolume.Protocol,
"source": k8sVolume.Source,
},
},
}, nil
@@ -212,9 +234,11 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume
pagingSkip := ("" != startingToken)
infos := cs.dsmService.ListVolumes()
sort.Sort(models.ByVolumeId(infos))
var count int32 = 0
for _, info := range infos {
if info.Lun.Uuid == startingToken {
if info.VolumeId == startingToken {
pagingSkip = false
}
@@ -223,18 +247,20 @@ func (cs *controllerServer) ListVolumes(ctx context.Context, req *csi.ListVolume
}
if maxEntries > 0 && count >= maxEntries {
nextToken = info.Lun.Uuid
nextToken = info.VolumeId
break
}
entries = append(entries, &csi.ListVolumesResponse_Entry{
Volume: &csi.Volume{
VolumeId: info.Lun.Uuid,
CapacityBytes: int64(info.Lun.Size),
VolumeId: info.VolumeId,
CapacityBytes: info.SizeInBytes,
VolumeContext: map[string]string{
"dsm": info.DsmIp,
"lunName": info.Lun.Name,
"targetIqn": info.Target.Iqn,
"shareName": info.Share.Name,
"protocol": info.Protocol,
},
},
})
@@ -290,7 +316,7 @@ func (cs *controllerServer) ControllerGetCapabilities(ctx context.Context, req *
func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateSnapshotRequest) (*csi.CreateSnapshotResponse, error) {
srcVolId := req.GetSourceVolumeId()
snapshotName := req.GetName()
snapshotName := req.GetName() // snapshot-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
params := req.GetParameters()
if srcVolId == "" {
@@ -301,37 +327,28 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
return nil, status.Error(codes.InvalidArgument, "Snapshot name is empty.")
}
snapshotInfos, err := cs.dsmService.ListAllSnapshots()
if err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to ListAllSnapshots(), err: %v", err))
}
// idempotency
for _, snapshotInfo := range snapshotInfos {
if snapshotInfo.Name == snapshotName {
if snapshotInfo.ParentUuid != srcVolId {
return nil, status.Errorf(codes.AlreadyExists, fmt.Sprintf("Snapshot [%s] already exists but volume id is incompatible", snapshotName))
}
createTime, err := ptypes.TimestampProto(time.Unix(snapshotInfo.CreateTime, 0))
if err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to convert create time, err: %v", err))
}
return &csi.CreateSnapshotResponse{
Snapshot: &csi.Snapshot{
SizeBytes: snapshotInfo.TotalSize,
SnapshotId: snapshotInfo.Uuid,
SourceVolumeId: snapshotInfo.ParentUuid,
CreationTime: createTime,
ReadyToUse: (snapshotInfo.Status == "Healthy"),
},
}, nil
orgSnap := cs.dsmService.GetSnapshotByName(snapshotName)
if orgSnap != nil {
// already existed
if orgSnap.ParentUuid != srcVolId {
return nil, status.Errorf(codes.AlreadyExists, fmt.Sprintf("Snapshot [%s] already exists but volume id is incompatible", snapshotName))
}
if orgSnap.CreateTime < 0 {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Bad create time: %v", orgSnap.CreateTime))
}
return &csi.CreateSnapshotResponse{
Snapshot: &csi.Snapshot{
SizeBytes: orgSnap.SizeInBytes,
SnapshotId: orgSnap.Uuid,
SourceVolumeId: orgSnap.ParentUuid,
CreationTime: timestamppb.New(time.Unix(orgSnap.CreateTime, 0)),
ReadyToUse: (orgSnap.Status == "Healthy"),
},
}, nil
}
// not exist, going to create a new snapshot
spec := &models.CreateK8sVolumeSnapshotSpec{
K8sVolumeId: srcVolId,
SnapshotName: snapshotName,
@@ -340,35 +357,19 @@ func (cs *controllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
IsLocked: utils.StringToBoolean(params["is_locked"]),
}
snapshotId, err := cs.dsmService.CreateSnapshot(spec)
snapshot, err := cs.dsmService.CreateSnapshot(spec)
if err != nil {
if err == utils.OutOfFreeSpaceError("") || err == utils.SnapshotReachMaxCountError("") {
return nil,status.Errorf(codes.ResourceExhausted, fmt.Sprintf("Failed to CreateSnapshot(%s), err: %v", srcVolId, err))
} else {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to CreateSnapshot(%s), err: %v", srcVolId, err))
}
}
snapshotInfo, err := cs.dsmService.GetSnapshot(snapshotId)
if err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to GetSnapshot(%s), err: %v", snapshotId, err))
}
createTime, err := ptypes.TimestampProto(time.Unix(snapshotInfo.CreateTime, 0))
if err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to convert create time, err: %v", err))
log.Errorf("Failed to CreateSnapshot, snapshotName: %s, srcVolId: %s, err: %v", snapshotName, srcVolId, err)
return nil, err
}
return &csi.CreateSnapshotResponse{
Snapshot: &csi.Snapshot{
SizeBytes: snapshotInfo.TotalSize,
SnapshotId: snapshotInfo.Uuid,
SourceVolumeId: snapshotInfo.ParentUuid,
CreationTime: createTime,
ReadyToUse: (snapshotInfo.Status == "Healthy"),
SizeBytes: snapshot.SizeInBytes,
SnapshotId: snapshot.Uuid,
SourceVolumeId: snapshot.ParentUuid,
CreationTime: timestamppb.New(time.Unix(snapshot.CreateTime, 0)),
ReadyToUse: (snapshot.Status == "Healthy"),
},
}, nil
}
@@ -402,22 +403,19 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap
}
pagingSkip := ("" != startingToken)
var snapshotInfos []webapi.SnapshotInfo
var err error
var snapshots []*models.K8sSnapshotRespSpec
if (srcVolId != "") {
snapshotInfos, err = cs.dsmService.ListSnapshots(srcVolId)
snapshots = cs.dsmService.ListSnapshots(srcVolId)
} else {
snapshotInfos, err = cs.dsmService.ListAllSnapshots()
snapshots = cs.dsmService.ListAllSnapshots()
}
if err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to ListSnapshots(%s), err: %v", srcVolId, err))
}
sort.Sort(models.BySnapshotAndParentUuid(snapshots))
var count int32 = 0
for _, snapshotInfo := range snapshotInfos {
if snapshotInfo.Uuid == startingToken {
for _, snapshot := range snapshots {
if snapshot.Uuid == startingToken {
pagingSkip = false
}
@@ -425,28 +423,21 @@ func (cs *controllerServer) ListSnapshots(ctx context.Context, req *csi.ListSnap
continue
}
if snapshotId != "" && snapshotInfo.Uuid != snapshotId {
if snapshotId != "" && snapshot.Uuid != snapshotId {
continue
}
if maxEntries > 0 && count >= maxEntries {
nextToken = snapshotInfo.Uuid
nextToken = snapshot.Uuid
break
}
createTime, err := ptypes.TimestampProto(time.Unix(snapshotInfo.CreateTime, 0))
if err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to convert create time, err: %v", err))
}
entries = append(entries, &csi.ListSnapshotsResponse_Entry{
Snapshot: &csi.Snapshot{
SizeBytes: snapshotInfo.TotalSize,
SnapshotId: snapshotInfo.Uuid,
SourceVolumeId: snapshotInfo.ParentUuid,
CreationTime: createTime,
ReadyToUse: (snapshotInfo.Status == "Healthy"),
SizeBytes: snapshot.SizeInBytes,
SnapshotId: snapshot.Uuid,
SourceVolumeId: snapshot.ParentUuid,
CreationTime: timestamppb.New(time.Unix(snapshot.CreateTime, 0)),
ReadyToUse: (snapshot.Status == "Healthy"),
},
})
@@ -477,14 +468,14 @@ func (cs *controllerServer) ControllerExpandVolume(ctx context.Context, req *csi
"InvalidArgument: Please check CapacityRange[%v]", capRange)
}
if err := cs.dsmService.ExpandLun(volumeId, sizeInByte); err != nil {
return nil, status.Error(codes.Internal,
fmt.Sprintf("Failed to expand volume [%s], err: %v", volumeId, err))
k8sVolume, err := cs.dsmService.ExpandVolume(volumeId, sizeInByte)
if err != nil {
return nil, err
}
return &csi.ControllerExpandVolumeResponse{
CapacityBytes: sizeInByte,
NodeExpansionRequired: true,
CapacityBytes: k8sVolume.SizeInBytes,
NodeExpansionRequired: (k8sVolume.Protocol == utils.ProtocolIscsi),
}, nil
}

View File

@@ -20,11 +20,16 @@ import (
"github.com/container-storage-interface/spec/lib/go/csi"
log "github.com/sirupsen/logrus"
"github.com/SynologyOpenSource/synology-csi/pkg/interfaces"
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
)
const (
DriverName = "csi.san.synology.com" // CSI dirver name
DriverVersion = "1.0.0"
DriverVersion = "1.1.0"
)
var (
supportedProtocolList = []string{utils.ProtocolIscsi, utils.ProtocolSmb}
)
type IDriver interface {
@@ -73,6 +78,8 @@ func NewControllerAndNodeDriver(nodeID string, endpoint string, dsmService inter
d.addNodeServiceCapabilities([]csi.NodeServiceCapability_RPC_Type{
csi.NodeServiceCapability_RPC_STAGE_UNSTAGE_VOLUME,
csi.NodeServiceCapability_RPC_EXPAND_VOLUME,
csi.NodeServiceCapability_RPC_VOLUME_MOUNT_GROUP,
// csi.NodeServiceCapability_RPC_GET_VOLUME_STATS, //TODO
})
log.Infof("New driver created: name=%s, nodeID=%s, version=%s, endpoint=%s", d.name, d.nodeID, d.version, d.endpoint)
@@ -127,3 +134,7 @@ func (d *Driver) addNodeServiceCapabilities(nsc []csi.NodeServiceCapability_RPC_
func (d *Driver) getVolumeCapabilityAccessModes() []*csi.VolumeCapability_AccessMode { // for debugging
return d.vCap
}
func isProtocolSupport(protocol string) bool {
return utils.SliceContains(supportedProtocolList, protocol)
}

View File

@@ -23,19 +23,22 @@ import (
"strings"
"time"
"github.com/cenkalti/backoff/v4"
"github.com/container-storage-interface/spec/lib/go/csi"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/mount-utils"
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
"github.com/SynologyOpenSource/synology-csi/pkg/interfaces"
"github.com/SynologyOpenSource/synology-csi/pkg/models"
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
)
type nodeServer struct {
Driver *Driver
Mounter *mount.SafeFormatAndMount
Driver *Driver
Mounter *mount.SafeFormatAndMount
dsmService interfaces.IDsmService
Initiator *initiatorDriver
}
@@ -122,7 +125,7 @@ func createTargetMountPath(mounter mount.Interface, mountPath string, isBlock bo
}
func (ns *nodeServer) loginTarget(volumeId string) error {
k8sVolume := ns.dsmService.GetVolume(volumeId);
k8sVolume := ns.dsmService.GetVolume(volumeId)
if k8sVolume == nil {
return status.Error(codes.NotFound, fmt.Sprintf("Volume[%s] is not found", volumeId))
@@ -139,13 +142,182 @@ func (ns *nodeServer) loginTarget(volumeId string) error {
func (ns *nodeServer) logoutTarget(volumeId string) {
k8sVolume := ns.dsmService.GetVolume(volumeId)
if k8sVolume == nil {
if k8sVolume == nil || k8sVolume.Protocol != utils.ProtocolIscsi {
return
}
ns.Initiator.logout(k8sVolume.Target.Iqn, k8sVolume.DsmIp)
}
func checkGidPresentInMountFlags(volumeMountGroup string, mountFlags []string) (bool, error) {
gidPresentInMountFlags := false
for _, mountFlag := range mountFlags {
if strings.HasPrefix(mountFlag, "gid") {
gidPresentInMountFlags = true
kvpair := strings.Split(mountFlag, "=")
if volumeMountGroup != "" && len(kvpair) == 2 && !strings.EqualFold(volumeMountGroup, kvpair[1]) {
return false, status.Error(codes.InvalidArgument, fmt.Sprintf("gid(%s) in storageClass and pod fsgroup(%s) are not equal", kvpair[1], volumeMountGroup))
}
}
}
return gidPresentInMountFlags, nil
}
func (ns *nodeServer) mountSensitiveWithRetry(sourcePath string, targetPath string, fsType string, options []string, sensitiveOptions []string) error {
mountBackoff := backoff.NewExponentialBackOff()
mountBackoff.InitialInterval = 1 * time.Second
mountBackoff.Multiplier = 2
mountBackoff.RandomizationFactor = 0.1
mountBackoff.MaxElapsedTime = 5 * time.Second
checkFinished := func() error {
if err := ns.Mounter.MountSensitive(sourcePath, targetPath, fsType, options, sensitiveOptions); err != nil {
return err
}
return nil
}
mountNotify := func(err error, duration time.Duration) {
log.Infof("Retry MountSensitive, waiting %3.2f seconds .....", float64(duration.Seconds()))
}
if err := backoff.RetryNotify(checkFinished, mountBackoff, mountNotify); err != nil {
log.Errorf("Could not finish mount after %3.2f seconds.", float64(mountBackoff.MaxElapsedTime.Seconds()))
return err
}
log.Debugf("Mount successfully. source: %s, target: %s", sourcePath, targetPath)
return nil
}
func (ns *nodeServer) setSMBVolumePermission(sourcePath string, userName string, authType utils.AuthType) error {
s := strings.Split(strings.TrimPrefix(sourcePath, "//"), "/")
if len(s) != 2 {
return fmt.Errorf("Failed to parse dsmIp and shareName from source path")
}
dsmIp, shareName := s[0], s[1]
dsm, err := ns.dsmService.GetDsm(dsmIp)
if err != nil {
return fmt.Errorf("Failed to get DSM[%s]", dsmIp)
}
permission := webapi.SharePermission{
Name: userName,
}
switch authType {
case utils.AuthTypeReadWrite:
permission.IsWritable = true
case utils.AuthTypeReadOnly:
permission.IsReadonly = true
case utils.AuthTypeNoAccess:
permission.IsDeny = true
default:
return fmt.Errorf("Unknown auth type: %s", string(authType))
}
permissions := append([]*webapi.SharePermission{}, &permission)
spec := webapi.SharePermissionSetSpec{
Name: shareName,
UserGroupType: models.UserGroupTypeLocalUser,
Permissions: permissions,
}
return dsm.SharePermissionSet(spec)
}
func (ns *nodeServer) nodeStageISCSIVolume(ctx context.Context, spec *models.NodeStageVolumeSpec) (*csi.NodeStageVolumeResponse, error) {
// if block mode, skip mount
if spec.VolumeCapability.GetBlock() != nil {
return &csi.NodeStageVolumeResponse{}, nil
}
if err := ns.loginTarget(spec.VolumeId); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
volumeMountPath := ns.getVolumeMountPath(spec.VolumeId)
if volumeMountPath == "" {
return nil, status.Error(codes.Internal, "Can't get volume mount path")
}
notMount, err := ns.Mounter.Interface.IsLikelyNotMountPoint(spec.StagingTargetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !notMount {
return &csi.NodeStageVolumeResponse{}, nil
}
fsType := spec.VolumeCapability.GetMount().GetFsType()
options := append([]string{"rw"}, spec.VolumeCapability.GetMount().GetMountFlags()...)
if err = ns.Mounter.FormatAndMount(volumeMountPath, spec.StagingTargetPath, fsType, options); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.NodeStageVolumeResponse{}, nil
}
func (ns *nodeServer) nodeStageSMBVolume(ctx context.Context, spec *models.NodeStageVolumeSpec, secrets map[string]string) (*csi.NodeStageVolumeResponse, error) {
if spec.VolumeCapability.GetBlock() != nil {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("SMB protocol only allows 'mount' access type"))
}
if spec.Source == "" { //"//<host>/<shareName>"
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Missing 'source' field"))
}
if secrets == nil {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Missing secrets for node staging volume"))
}
username := strings.TrimSpace(secrets["username"])
password := strings.TrimSpace(secrets["password"])
domain := strings.TrimSpace(secrets["domain"])
// set permission to access the share
if err := ns.setSMBVolumePermission(spec.Source, username, utils.AuthTypeReadWrite); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to set permission, source: %s, err: %v", spec.Source, err))
}
// create mount point if not exists
targetPath := spec.StagingTargetPath
notMount, err := createTargetMountPath(ns.Mounter.Interface, targetPath, false)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !notMount {
log.Infof("NodeStageVolume: %s is already mounted", targetPath)
return &csi.NodeStageVolumeResponse{}, nil // already mount
}
fsType := "cifs"
options := spec.VolumeCapability.GetMount().GetMountFlags()
volumeMountGroup := spec.VolumeCapability.GetMount().GetVolumeMountGroup()
gidPresent, err := checkGidPresentInMountFlags(volumeMountGroup, options)
if err != nil {
return nil, err
}
if !gidPresent && volumeMountGroup != "" {
options = append(options, fmt.Sprintf("gid=%s", volumeMountGroup))
}
if domain != "" {
options = append(options, fmt.Sprintf("%s=%s", "domain", domain))
}
var sensitiveOptions = []string{fmt.Sprintf("%s=%s,%s=%s", "username", username, "password", password)}
if err := ns.mountSensitiveWithRetry(spec.Source, targetPath, fsType, options, sensitiveOptions); err != nil {
return nil, status.Error(codes.Internal,
fmt.Sprintf("Volume[%s] failed to mount %q on %q. err: %v", spec.VolumeId, spec.Source, targetPath, err))
}
return &csi.NodeStageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
volumeId, stagingTargetPath, volumeCapability :=
req.GetVolumeId(), req.GetStagingTargetPath(), req.GetVolumeCapability()
@@ -155,47 +327,32 @@ func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVol
"InvalidArgument: Please check volume ID, staging target path and volume capability.")
}
// if block mode, skip mount
if volumeCapability.GetBlock() != nil {
return &csi.NodeStageVolumeResponse{}, nil
if volumeCapability.GetBlock() != nil && volumeCapability.GetMount() != nil {
return nil, status.Error(codes.InvalidArgument, "Cannot mix block and mount capabilities")
}
if err := ns.loginTarget(volumeId); err != nil {
return nil, status.Error(codes.Internal, err.Error())
spec := &models.NodeStageVolumeSpec{
VolumeId: volumeId,
StagingTargetPath: stagingTargetPath,
VolumeCapability: volumeCapability,
Dsm: req.VolumeContext["dsm"],
Source: req.VolumeContext["source"], // filled by CreateVolume response
}
volumeMountPath := ns.getVolumeMountPath(volumeId)
if volumeMountPath == "" {
return nil, status.Error(codes.Internal, "Can't get volume mount path")
switch req.VolumeContext["protocol"] {
case utils.ProtocolSmb:
return ns.nodeStageSMBVolume(ctx, spec, req.GetSecrets())
default:
return ns.nodeStageISCSIVolume(ctx, spec)
}
notMount, err := ns.Mounter.Interface.IsLikelyNotMountPoint(stagingTargetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if !notMount {
return &csi.NodeStageVolumeResponse{}, nil
}
fsType := volumeCapability.GetMount().GetFsType()
mountFlags := volumeCapability.GetMount().GetMountFlags()
options := append([]string{"rw"}, mountFlags...)
if err = ns.Mounter.FormatAndMount(volumeMountPath, stagingTargetPath, fsType, options); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
return &csi.NodeStageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
if req.GetVolumeId() == "" { // Useless, just for sanity check
volumeID, stagingTargetPath := req.GetVolumeId(), req.GetStagingTargetPath()
if volumeID == "" {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
stagingTargetPath := req.GetStagingTargetPath()
if stagingTargetPath == "" {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
@@ -211,18 +368,26 @@ func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
}
}
ns.logoutTarget(volumeID)
return &csi.NodeUnstageVolumeResponse{}, nil
}
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
volumeId, targetPath, stagingTargetPath := req.GetVolumeId(), req.GetTargetPath(), req.GetStagingTargetPath()
isBlock := req.GetVolumeCapability().GetBlock() != nil
if volumeId == "" || targetPath == "" || stagingTargetPath == "" {
return nil, status.Error(codes.InvalidArgument,
"InvalidArgument: Please check volume ID, target path and staging target path.")
}
if req.GetVolumeCapability() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
}
isBlock := req.GetVolumeCapability().GetBlock() != nil // raw block, only for iscsi protocol
fsType := req.GetVolumeCapability().GetMount().GetFsType()
notMount, err := createTargetMountPath(ns.Mounter.Interface, targetPath, isBlock)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
@@ -231,45 +396,51 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
return &csi.NodePublishVolumeResponse{}, nil
}
if err := ns.loginTarget(volumeId); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
volumeMountPath := ns.getVolumeMountPath(volumeId)
if volumeMountPath == "" {
return nil, status.Error(codes.Internal, "Can't get volume mount path")
}
options := []string{"bind"}
if req.GetReadonly() {
options = append(options, "ro")
}
if isBlock {
err = ns.Mounter.Interface.Mount(volumeMountPath, targetPath, "", options)
} else {
fsType := req.GetVolumeCapability().GetMount().GetFsType()
err = ns.Mounter.Interface.Mount(stagingTargetPath, targetPath, fsType, options)
}
switch req.VolumeContext["protocol"] {
case utils.ProtocolSmb:
if err := ns.Mounter.Interface.Mount(stagingTargetPath, targetPath, "", options); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
default:
if err := ns.loginTarget(volumeId); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
volumeMountPath := ns.getVolumeMountPath(volumeId)
if volumeMountPath == "" {
return nil, status.Error(codes.Internal, "Can't get volume mount path")
}
if isBlock {
err = ns.Mounter.Interface.Mount(volumeMountPath, targetPath, "", options)
} else {
err = ns.Mounter.Interface.Mount(stagingTargetPath, targetPath, fsType, options)
}
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
return &csi.NodePublishVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
volumeId, targetPath := req.GetVolumeId(), req.GetTargetPath()
if volumeId == "" {
if req.GetVolumeId() == "" { // Not needed, but still a mandatory field
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
targetPath := req.GetTargetPath()
if targetPath == "" {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
if _, err := os.Stat(targetPath); err != nil {
if os.IsNotExist(err){
if os.IsNotExist(err) {
return &csi.NodeUnpublishVolumeResponse{}, nil
}
return nil, status.Errorf(codes.Internal, err.Error())
@@ -285,24 +456,6 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return &csi.NodeUnpublishVolumeResponse{}, nil
}
needToLogout := true
list, err := ns.Mounter.Interface.GetMountRefs(targetPath)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
for _, path := range list {
filePrefix := "/var/lib/kubelet/pods/"
blkPrefix := "/var/lib/kubelet/plugins/kubernetes.io/csi/volumeDevices/publish/"
if strings.HasPrefix(path, filePrefix) || strings.HasPrefix(path, blkPrefix) {
needToLogout = false
break
}
}
if err := ns.Mounter.Interface.Unmount(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@@ -311,10 +464,6 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return nil, status.Errorf(codes.Internal, "Failed to remove target path.")
}
if needToLogout {
ns.logoutTarget(volumeId)
}
return &csi.NodeUnpublishVolumeResponse{}, nil
}
@@ -350,8 +499,19 @@ func (ns *nodeServer) NodeGetVolumeStats(ctx context.Context, req *csi.NodeGetVo
fmt.Sprintf("Volume[%s] does not exist on the %s", volumeId, volumePath))
}
lun := k8sVolume.Lun
if k8sVolume.Protocol == utils.ProtocolSmb {
return &csi.NodeGetVolumeStatsResponse{
Usage: []*csi.VolumeUsage{
&csi.VolumeUsage{
Total: k8sVolume.SizeInBytes,
Unit: csi.VolumeUsage_BYTES,
},
},
}, nil
}
lun := k8sVolume.Lun
return &csi.NodeGetVolumeStatsResponse{
Usage: []*csi.VolumeUsage{
&csi.VolumeUsage{
@@ -376,6 +536,11 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV
return nil, status.Error(codes.NotFound, fmt.Sprintf("Volume[%s] is not found", volumeId))
}
if k8sVolume.Protocol == utils.ProtocolSmb {
return &csi.NodeExpandVolumeResponse{
CapacityBytes: sizeInByte}, nil
}
if err := ns.Initiator.rescan(k8sVolume.Target.Iqn); err != nil {
return nil, status.Error(codes.Internal, fmt.Sprintf("Failed to rescan. err: %v", err))
}
@@ -400,4 +565,4 @@ func (ns *nodeServer) NodeExpandVolume(ctx context.Context, req *csi.NodeExpandV
}
return &csi.NodeExpandVolumeResponse{
CapacityBytes: sizeInByte}, nil
}
}

View File

@@ -149,11 +149,11 @@ func getLunTypeByInputParams(lunType string, isThin bool, locationFsType string)
return "", fmt.Errorf("Unknown volume fs type: %s", locationFsType)
}
func (service *DsmService) createMappingTarget(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, lunUuid string) error {
func (service *DsmService) createMappingTarget(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, lunUuid string) (webapi.TargetInfo, error) {
dsmInfo, err := dsm.DsmInfoGet()
if err != nil {
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s] info", dsm.Ip));
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s] info", dsm.Ip));
}
genTargetIqn := func() string {
@@ -175,35 +175,35 @@ func (service *DsmService) createMappingTarget(dsm *webapi.DSM, spec *models.Cre
targetId, err := dsm.TargetCreate(targetSpec)
if err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to create target with spec: %v, err: %v", targetSpec, err))
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to create target with spec: %v, err: %v", targetSpec, err))
}
if targetInfo, err := dsm.TargetGet(targetSpec.Name); err != nil {
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to get target with spec: %v, err: %v", targetSpec, err))
targetInfo, err := dsm.TargetGet(targetSpec.Name)
if err != nil {
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get target with spec: %v, err: %v", targetSpec, err))
} else {
targetId = strconv.Itoa(targetInfo.TargetId);
}
if spec.MultipleSession == true {
if err := dsm.TargetSet(targetId, 0); err != nil {
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to set target [%s] max session, err: %v", spec.TargetName, err))
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to set target [%s] max session, err: %v", spec.TargetName, err))
}
}
err = dsm.LunMapTarget([]string{targetId}, lunUuid)
if err != nil {
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to map target [%s] to lun [%s], err: %v", spec.TargetName, lunUuid, err))
if err := dsm.LunMapTarget([]string{targetId}, lunUuid); err != nil {
return webapi.TargetInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("Failed to map target [%s] to lun [%s], err: %v", spec.TargetName, lunUuid, err))
}
return nil
return targetInfo, nil
}
func (service *DsmService) createVolumeByDsm(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec) (webapi.LunInfo, error) {
func (service *DsmService) createVolumeByDsm(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec) (*models.K8sVolumeRespSpec, error) {
// 1. Find a available location
if spec.Location == "" {
vol, err := service.getFirstAvailableVolume(dsm, spec.Size)
if err != nil {
return webapi.LunInfo{},
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get available location, err: %v", err))
}
spec.Location = vol.Path
@@ -212,13 +212,13 @@ func (service *DsmService) createVolumeByDsm(dsm *webapi.DSM, spec *models.Creat
// 2. Check if location exists
dsmVolInfo, err := dsm.VolumeGet(spec.Location)
if err != nil {
return webapi.LunInfo{},
return nil,
status.Errorf(codes.InvalidArgument, fmt.Sprintf("Unable to find location %s", spec.Location))
}
lunType, err := getLunTypeByInputParams(spec.Type, spec.ThinProvisioning, dsmVolInfo.FsType)
if err != nil {
return webapi.LunInfo{},
return nil,
status.Errorf(codes.InvalidArgument, fmt.Sprintf("Unknown volume fs type: %s, location: %s", dsmVolInfo.FsType, spec.Location))
}
@@ -234,29 +234,29 @@ func (service *DsmService) createVolumeByDsm(dsm *webapi.DSM, spec *models.Creat
_, err = dsm.LunCreate(lunSpec)
if err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
return webapi.LunInfo{},
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create Volume with name: %s, err: %v", spec.K8sVolumeName, err))
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create LUN, err: %v", err))
}
// No matter lun existed or not, Get Lun by name
lunInfo, err := dsm.LunGet(spec.LunName)
if err != nil {
return webapi.LunInfo{},
return nil,
// discussion with log
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed LUN with name: %s, err: %v", spec.LunName, err))
}
// 4. Create Target and Map to Lun
err = service.createMappingTarget(dsm, spec, lunInfo.Uuid)
targetInfo, err := service.createMappingTarget(dsm, spec, lunInfo.Uuid)
if err != nil {
// FIXME need to delete lun and target
return webapi.LunInfo{},
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create and map target, err: %v", err))
}
log.Debugf("[%s] CreateVolume Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid);
log.Debugf("[%s] CreateVolume Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid)
return lunInfo, nil
return DsmLunToK8sVolume(dsm.Ip, lunInfo, targetInfo), nil
}
func waitCloneFinished(dsm *webapi.DSM, lunName string) error {
@@ -291,47 +291,47 @@ func waitCloneFinished(dsm *webapi.DSM, lunName string) error {
return nil
}
func (service *DsmService) createVolumeBySnapshot(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, snapshotInfo webapi.SnapshotInfo) (webapi.LunInfo, error) {
if spec.Size != 0 && spec.Size != snapshotInfo.TotalSize {
return webapi.LunInfo{}, status.Errorf(codes.OutOfRange, "Lun size [%d] is not equal to snapshot size [%d]", spec.Size, snapshotInfo.TotalSize)
func (service *DsmService) createVolumeBySnapshot(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcSnapshot *models.K8sSnapshotRespSpec) (*models.K8sVolumeRespSpec, error) {
if spec.Size != 0 && spec.Size != srcSnapshot.SizeInBytes {
return nil, status.Errorf(codes.OutOfRange, "Requested lun size [%d] is not equal to snapshot size [%d]", spec.Size, srcSnapshot.SizeInBytes)
}
snapshotCloneSpec := webapi.SnapshotCloneSpec{
Name: spec.LunName,
SrcLunUuid: snapshotInfo.ParentUuid,
SrcSnapshotUuid: snapshotInfo.Uuid,
SrcLunUuid: srcSnapshot.ParentUuid,
SrcSnapshotUuid: srcSnapshot.Uuid,
}
if _, err := dsm.SnapshotClone(snapshotCloneSpec); err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
return webapi.LunInfo{},
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source snapshot ID: %s, err: %v", snapshotInfo.Uuid, err))
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source snapshot ID: %s, err: %v", srcSnapshot.Uuid, err))
}
if err := waitCloneFinished(dsm, spec.LunName); err != nil {
return webapi.LunInfo{}, status.Errorf(codes.Internal, err.Error())
return nil, status.Errorf(codes.Internal, err.Error())
}
lunInfo, err := dsm.LunGet(spec.LunName)
if err != nil {
return webapi.LunInfo{},
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed LUN with name: %s, err: %v", spec.LunName, err))
}
err = service.createMappingTarget(dsm, spec, lunInfo.Uuid)
targetInfo, err := service.createMappingTarget(dsm, spec, lunInfo.Uuid)
if err != nil {
// FIXME need to delete lun and target
return webapi.LunInfo{},
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create and map target, err: %v", err))
}
log.Debugf("[%s] createVolumeBySnapshot Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid);
log.Debugf("[%s] createVolumeBySnapshot Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid)
return lunInfo, nil
return DsmLunToK8sVolume(dsm.Ip, lunInfo, targetInfo), nil
}
func (service *DsmService) createVolumeByVolume(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcLunInfo webapi.LunInfo) (webapi.LunInfo, error) {
func (service *DsmService) createVolumeByVolume(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcLunInfo webapi.LunInfo) (*models.K8sVolumeRespSpec, error) {
if spec.Size != 0 && spec.Size != int64(srcLunInfo.Size) {
return webapi.LunInfo{}, status.Errorf(codes.OutOfRange, "Lun size [%d] is not equal to src lun size [%d]", spec.Size, srcLunInfo.Size)
return nil, status.Errorf(codes.OutOfRange, "Requested lun size [%d] is not equal to src lun size [%d]", spec.Size, srcLunInfo.Size)
}
if spec.Location == "" {
@@ -345,154 +345,230 @@ func (service *DsmService) createVolumeByVolume(dsm *webapi.DSM, spec *models.Cr
}
if _, err := dsm.LunClone(lunCloneSpec); err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
return webapi.LunInfo{},
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source volume ID: %s, err: %v", srcLunInfo.Uuid, err))
}
if err := waitCloneFinished(dsm, spec.LunName); err != nil {
return webapi.LunInfo{}, status.Errorf(codes.Internal, err.Error())
return nil, status.Errorf(codes.Internal, err.Error())
}
lunInfo, err := dsm.LunGet(spec.LunName)
if err != nil {
return webapi.LunInfo{},
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed LUN with name: %s, err: %v", spec.LunName, err))
}
err = service.createMappingTarget(dsm, spec, lunInfo.Uuid)
targetInfo, err := service.createMappingTarget(dsm, spec, lunInfo.Uuid)
if err != nil {
// FIXME need to delete lun and target
return webapi.LunInfo{},
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create and map target, err: %v", err))
}
log.Debugf("[%s] createVolumeByVolume Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid);
log.Debugf("[%s] createVolumeByVolume Successfully. VolumeId: %s", dsm.Ip, lunInfo.Uuid)
return lunInfo, nil
return DsmLunToK8sVolume(dsm.Ip, lunInfo, targetInfo), nil
}
func (service *DsmService) CreateVolume(spec *models.CreateK8sVolumeSpec) (webapi.LunInfo, string, error) {
func DsmShareToK8sVolume(dsmIp string, info webapi.ShareInfo) *models.K8sVolumeRespSpec {
return &models.K8sVolumeRespSpec{
DsmIp: dsmIp,
VolumeId: info.Uuid,
SizeInBytes: utils.MBToBytes(info.QuotaValueInMB),
Location: info.VolPath,
Name: info.Name,
Source: "//" + dsmIp + "/" + info.Name,
Protocol: utils.ProtocolSmb,
Share: info,
}
}
func DsmLunToK8sVolume(dsmIp string, info webapi.LunInfo, targetInfo webapi.TargetInfo) *models.K8sVolumeRespSpec {
return &models.K8sVolumeRespSpec{
DsmIp: dsmIp,
VolumeId: info.Uuid,
SizeInBytes: int64(info.Size),
Location: info.Location,
Name: info.Name,
Source: "",
Protocol: utils.ProtocolIscsi,
Lun: info,
Target: targetInfo,
}
}
func (service *DsmService) CreateVolume(spec *models.CreateK8sVolumeSpec) (*models.K8sVolumeRespSpec, error) {
if spec.SourceVolumeId != "" {
/* Create volume by exists volume (Clone) */
k8sVolume := service.GetVolume(spec.SourceVolumeId)
if k8sVolume == nil {
return webapi.LunInfo{}, "", status.Errorf(codes.NotFound, fmt.Sprintf("No such volume id: %s", spec.SourceVolumeId))
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("No such volume id: %s", spec.SourceVolumeId))
}
dsm, err := service.GetDsm(k8sVolume.DsmIp)
if err != nil {
return webapi.LunInfo{}, "", status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
}
lunInfo, err := service.createVolumeByVolume(dsm, spec, k8sVolume.Lun)
return lunInfo, dsm.Ip, err
} else if spec.SourceSnapshotId != "" {
/* Create volume by snapshot */
for _, dsm := range service.dsms {
snapshotInfo, err := dsm.SnapshotGet(spec.SourceSnapshotId)
if err != nil {
continue
}
lunInfo, err := service.createVolumeBySnapshot(dsm, spec, snapshotInfo)
return lunInfo, dsm.Ip, err
if spec.Protocol == utils.ProtocolIscsi {
return service.createVolumeByVolume(dsm, spec, k8sVolume.Lun)
} else if spec.Protocol == utils.ProtocolSmb {
return service.createSMBVolumeByVolume(dsm, spec, k8sVolume.Share)
}
return webapi.LunInfo{}, "", status.Errorf(codes.NotFound, fmt.Sprintf("No such snapshot id: %s", spec.SourceSnapshotId))
} else if spec.DsmIp != "" {
/* Create volume by specific dsm ip */
dsm, err := service.GetDsm(spec.DsmIp)
if err != nil {
return webapi.LunInfo{}, "", status.Errorf(codes.Internal, fmt.Sprintf("%v", err))
}
lunInfo, err := service.createVolumeByDsm(dsm, spec)
return lunInfo, dsm.Ip, err
} else {
/* Find appropriate dsm to create volume */
for _, dsm := range service.dsms {
lunInfo, err := service.createVolumeByDsm(dsm, spec)
if err != nil {
log.Errorf("[%s] Failed to create Volume: %v", dsm.Ip, err)
continue
}
return lunInfo, dsm.Ip, nil
}
return webapi.LunInfo{}, "", status.Errorf(codes.Internal, fmt.Sprintf("Couldn't find any host available to create Volume"))
return nil, status.Error(codes.InvalidArgument, "Unknown protocol")
}
if spec.SourceSnapshotId != "" {
/* Create volume by snapshot */
snapshot := service.GetSnapshotByUuid(spec.SourceSnapshotId)
if snapshot == nil {
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("No such snapshot id: %s", spec.SourceSnapshotId))
}
// found source by snapshot id, check allowable
if spec.DsmIp != "" && spec.DsmIp != snapshot.DsmIp {
msg := fmt.Sprintf("The source PVC and destination PVCs must be on the same DSM for cloning from snapshots. Source is on %s, but new PVC is on %s",
snapshot.DsmIp, spec.DsmIp)
return nil, status.Errorf(codes.InvalidArgument, msg)
}
if spec.Location != "" && spec.Location != snapshot.RootPath {
msg := fmt.Sprintf("The source PVC and destination PVCs must be on the same location for cloning from snapshots. Source is on %s, but new PVC is on %s",
snapshot.RootPath, spec.Location)
return nil, status.Errorf(codes.InvalidArgument, msg)
}
if spec.Protocol != snapshot.Protocol {
msg := fmt.Sprintf("The source PVC and destination PVCs shouldn't have different protocols. Source is %s, but new PVC is %s",
snapshot.Protocol, spec.Protocol)
return nil, status.Errorf(codes.InvalidArgument, msg)
}
dsm, err := service.GetDsm(snapshot.DsmIp)
if err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", snapshot.DsmIp))
}
if spec.Protocol == utils.ProtocolIscsi {
return service.createVolumeBySnapshot(dsm, spec, snapshot)
} else if spec.Protocol == utils.ProtocolSmb {
return service.createSMBVolumeBySnapshot(dsm, spec, snapshot)
}
return nil, status.Error(codes.InvalidArgument, "Unknown protocol")
}
/* Find appropriate dsm to create volume */
for _, dsm := range service.dsms {
if spec.DsmIp != "" && spec.DsmIp != dsm.Ip {
continue
}
var k8sVolume *models.K8sVolumeRespSpec
var err error
if spec.Protocol == utils.ProtocolIscsi {
k8sVolume, err = service.createVolumeByDsm(dsm, spec)
} else if spec.Protocol == utils.ProtocolSmb {
k8sVolume, err = service.createSMBVolumeByDsm(dsm, spec)
}
if err != nil {
log.Errorf("[%s] Failed to create Volume: %v", dsm.Ip, err)
continue
}
return k8sVolume, nil
}
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Couldn't find any host available to create Volume"))
}
func (service *DsmService) DeleteVolume(volumeId string) error {
k8sVolume := service.GetVolume(volumeId)
func (service *DsmService) DeleteVolume(volId string) error {
k8sVolume := service.GetVolume(volId)
if k8sVolume == nil {
log.Infof("Skip delete volume[%s] that is no exist", volumeId)
log.Infof("Skip delete volume[%s] that is no exist", volId)
return nil
}
lun, target := k8sVolume.Lun, k8sVolume.Target
dsm, err := service.GetDsm(k8sVolume.DsmIp)
if err != nil {
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
}
if err := dsm.LunDelete(lun.Uuid); err != nil {
return err
}
if k8sVolume.Protocol == utils.ProtocolSmb {
if err := dsm.ShareDelete(k8sVolume.Share.Name); err != nil {
log.Errorf("[%s] Failed to delete Share(%s): %v", dsm.Ip, k8sVolume.Share.Name, err)
return err
}
} else {
lun, target := k8sVolume.Lun, k8sVolume.Target
if len(target.MappedLuns) != 1 {
log.Infof("Skip deletes target[%s] that was mapped with lun. DSM[%s]", target.Name, dsm.Ip)
return nil
}
if err := dsm.LunDelete(lun.Uuid); err != nil {
if _, err := dsm.LunGet(lun.Uuid); err != nil && errors.Is(err, utils.NoSuchLunError("")) {
return nil
}
log.Errorf("[%s] Failed to delete LUN(%s): %v", dsm.Ip, lun.Uuid, err)
return err
}
if err := dsm.TargetDelete(strconv.Itoa(target.TargetId)); err != nil {
return err
if len(target.MappedLuns) != 1 {
log.Infof("Skip deletes target[%s] that was mapped with lun. DSM[%s]", target.Name, dsm.Ip)
return nil
}
if err := dsm.TargetDelete(strconv.Itoa(target.TargetId)); err != nil {
if _, err := dsm.TargetGet(strconv.Itoa(target.TargetId)); err != nil {
return nil
}
log.Errorf("[%s] Failed to delete target(%d): %v", dsm.Ip, target.TargetId, err)
return err
}
}
return nil
}
func (service *DsmService) listVolumesByDsm(dsm *webapi.DSM, infos *[]*models.ListK8sVolumeRespSpec) {
targetInfos, err := dsm.TargetList()
if err != nil {
log.Errorf("[%s] Failed to list targets: %v", dsm.Ip, err)
}
func (service *DsmService) listISCSIVolumes(dsmIp string) (infos []*models.K8sVolumeRespSpec) {
for _, dsm := range service.dsms {
if dsmIp != "" && dsmIp != dsm.Ip {
continue
}
for _, target := range targetInfos {
// TODO: use target.ConnectedSessions to filter targets
for _, mapping := range target.MappedLuns {
lun, err := dsm.LunGet(mapping.LunUuid)
if err != nil {
log.Errorf("[%s] Failed to get LUN(%s): %v", dsm.Ip, mapping.LunUuid, err)
targetInfos, err := dsm.TargetList()
if err != nil {
log.Errorf("[%s] Failed to list targets: %v", dsm.Ip, err)
continue
}
for _, target := range targetInfos {
// TODO: use target.ConnectedSessions to filter targets
for _, mapping := range target.MappedLuns {
lun, err := dsm.LunGet(mapping.LunUuid)
if err != nil {
log.Errorf("[%s] Failed to get LUN(%s): %v", dsm.Ip, mapping.LunUuid, err)
}
if !strings.HasPrefix(lun.Name, models.LunPrefix) {
continue
}
// FIXME: filter same LUN mapping to two target
infos = append(infos, DsmLunToK8sVolume(dsm.Ip, lun, target))
}
if !strings.HasPrefix(lun.Name, models.LunPrefix) {
continue
}
*infos = append(*infos, &models.ListK8sVolumeRespSpec{
DsmIp: dsm.Ip,
Target: target,
Lun: lun,
})
}
}
return
return infos
}
func (service *DsmService) ListVolumes() []*models.ListK8sVolumeRespSpec {
var infos []*models.ListK8sVolumeRespSpec
for _, dsm := range service.dsms {
service.listVolumesByDsm(dsm, &infos)
}
func (service *DsmService) ListVolumes() (infos []*models.K8sVolumeRespSpec) {
infos = append(infos, service.listISCSIVolumes("")...)
infos = append(infos, service.listSMBVolumes("")...)
return infos
}
func (service *DsmService) GetVolume(lunUuid string) *models.ListK8sVolumeRespSpec {
func (service *DsmService) GetVolume(volId string) *models.K8sVolumeRespSpec {
volumes := service.ListVolumes()
for _, volume := range volumes {
if volume.Lun.Uuid == lunUuid {
if volume.VolumeId == volId {
return volume
}
}
@@ -500,100 +576,74 @@ func (service *DsmService) GetVolume(lunUuid string) *models.ListK8sVolumeRespSp
return nil
}
func (service *DsmService) ExpandLun(lunUuid string, newSize int64) error {
k8sVolume := service.GetVolume(lunUuid);
if k8sVolume == nil {
return status.Errorf(codes.InvalidArgument, fmt.Sprintf("Can't find volume[%s].", lunUuid))
}
if int64(k8sVolume.Lun.Size) > newSize {
return status.Errorf(codes.InvalidArgument,
fmt.Sprintf("Failed to expand volume[%s], because expand size[%d] bigger than before[%d].",
lunUuid, newSize, k8sVolume.Lun.Size))
}
spec := webapi.LunUpdateSpec{
Uuid: lunUuid,
NewSize: uint64(newSize),
}
dsm, err := service.GetDsm(k8sVolume.DsmIp)
if err != nil {
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
}
if err := dsm.LunUpdate(spec); err != nil {
return status.Errorf(codes.InvalidArgument,
fmt.Sprintf("Failed to expand volume[%s]. err: %v", lunUuid, err))
func (service *DsmService) GetVolumeByName(volName string) *models.K8sVolumeRespSpec {
volumes := service.ListVolumes()
for _, volume := range volumes {
if volume.Name == models.GenLunName(volName) ||
volume.Name == models.GenShareName(volName) {
return volume
}
}
return nil
}
func (service *DsmService) CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSpec) (string, error) {
k8sVolume := service.GetVolume(spec.K8sVolumeId);
func (service *DsmService) GetSnapshotByName(snapshotName string) *models.K8sSnapshotRespSpec {
snaps := service.ListAllSnapshots()
for _, snap := range snaps {
if snap.Name == snapshotName {
return snap
}
}
return nil
}
func (service *DsmService) ExpandVolume(volId string, newSize int64) (*models.K8sVolumeRespSpec, error) {
k8sVolume := service.GetVolume(volId);
if k8sVolume == nil {
return "", status.Errorf(codes.NotFound, fmt.Sprintf("Can't find volume[%s].", spec.K8sVolumeId))
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Can't find volume[%s].", volId))
}
snapshotSpec := webapi.SnapshotCreateSpec{
Name: spec.SnapshotName,
LunUuid: spec.K8sVolumeId,
Description: spec.Description,
TakenBy: spec.TakenBy,
IsLocked: spec.IsLocked,
if k8sVolume.SizeInBytes > newSize {
return nil, status.Errorf(codes.InvalidArgument,
fmt.Sprintf("Failed to expand volume[%s], because expand size[%d] smaller than before[%d].",
volId, newSize, k8sVolume.SizeInBytes))
}
dsm, err := service.GetDsm(k8sVolume.DsmIp)
if err != nil {
return "", status.Errorf(codes.InvalidArgument, fmt.Sprintf("Failed to get dsm: %v", err))
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get DSM[%s]", k8sVolume.DsmIp))
}
snapshotUuid, err := dsm.SnapshotCreate(snapshotSpec)
if err != nil {
return "", err
if k8sVolume.Protocol == utils.ProtocolSmb {
newSizeInMB := utils.BytesToMBCeil(newSize) // round up to MB
if err := dsm.SetShareQuota(k8sVolume.Share, newSizeInMB); err != nil {
log.Errorf("[%s] Failed to set quota [%d (MB)] to Share [%s]: %v",
dsm.Ip, newSizeInMB, k8sVolume.Share.Name, err)
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to expand volume[%s]. err: %v", volId, err))
}
// convert MB to bytes, may be diff from the input newSize
k8sVolume.SizeInBytes = utils.MBToBytes(newSizeInMB)
} else {
spec := webapi.LunUpdateSpec{
Uuid: volId,
NewSize: uint64(newSize),
}
if err := dsm.LunUpdate(spec); err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to expand volume[%s]. err: %v", volId, err))
}
k8sVolume.SizeInBytes = newSize
}
return snapshotUuid, nil
return k8sVolume, nil
}
func (service *DsmService) DeleteSnapshot(snapshotUuid string) error {
for _, dsm := range service.dsms {
_, err := dsm.SnapshotGet(snapshotUuid)
if err != nil {
continue
}
func (service *DsmService) CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSpec) (*models.K8sSnapshotRespSpec, error) {
srcVolId := spec.K8sVolumeId
err = dsm.SnapshotDelete(snapshotUuid)
if err != nil {
return status.Errorf(codes.Internal, fmt.Sprintf("Failed to delete snapshot [%s]. err: %v", snapshotUuid, err))
}
return nil
}
return nil
}
func (service *DsmService) ListAllSnapshots() ([]webapi.SnapshotInfo, error) {
var allInfos []webapi.SnapshotInfo
for _, dsm := range service.dsms {
infos, err := dsm.SnapshotList("")
if err != nil {
continue
}
allInfos = append(allInfos, infos...)
}
return allInfos, nil
}
func (service *DsmService) ListSnapshots(lunUuid string) ([]webapi.SnapshotInfo, error) {
k8sVolume := service.GetVolume(lunUuid);
k8sVolume := service.GetVolume(srcVolId);
if k8sVolume == nil {
return []webapi.SnapshotInfo{}, nil // return empty when the volume does not exist
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("Can't find volume[%s].", srcVolId))
}
dsm, err := service.GetDsm(k8sVolume.DsmIp)
@@ -601,24 +651,201 @@ func (service *DsmService) ListSnapshots(lunUuid string) ([]webapi.SnapshotInfo,
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Failed to get dsm: %v", err))
}
infos, err := dsm.SnapshotList(lunUuid)
if err != nil {
return nil, status.Errorf(codes.Internal,
fmt.Sprintf("Failed to list snapshot on lun [%s]. err: %v", lunUuid, err))
if k8sVolume.Protocol == utils.ProtocolIscsi {
snapshotSpec := webapi.SnapshotCreateSpec{
Name: spec.SnapshotName,
LunUuid: srcVolId,
Description: spec.Description,
TakenBy: spec.TakenBy,
IsLocked: spec.IsLocked,
}
snapshotUuid, err := dsm.SnapshotCreate(snapshotSpec)
if err != nil {
if err == utils.OutOfFreeSpaceError("") || err == utils.SnapshotReachMaxCountError("") {
return nil,status.Errorf(codes.ResourceExhausted, fmt.Sprintf("Failed to SnapshotCreate(%s), err: %v", srcVolId, err))
}
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to SnapshotCreate(%s), err: %v", srcVolId, err))
}
if snapshot := service.getISCSISnapshot(snapshotUuid); snapshot != nil {
return snapshot, nil
}
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("Failed to get iscsi snapshot (%s). Not found", snapshotUuid))
} else if k8sVolume.Protocol == utils.ProtocolSmb {
snapshotSpec := webapi.ShareSnapshotCreateSpec{
ShareName: k8sVolume.Share.Name,
Desc: models.ShareSnapshotDescPrefix + spec.SnapshotName, // limitations: don't change the desc by DSM
IsLocked: spec.IsLocked,
}
snapshotTime, err := dsm.ShareSnapshotCreate(snapshotSpec)
if err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to ShareSnapshotCreate(%s), err: %v", srcVolId, err))
}
snapshots := service.listSMBSnapshotsByDsm(dsm)
for _, snapshot := range snapshots {
if snapshot.Time == snapshotTime && snapshot.ParentUuid == srcVolId {
return snapshot, nil
}
}
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("Failed to get smb snapshot (%s, %s). Not found", snapshotTime, srcVolId))
}
return infos, nil
return nil, status.Error(codes.InvalidArgument, "Unsupported volume protocol")
}
func (service *DsmService) GetSnapshot(snapshotUuid string) (webapi.SnapshotInfo, error) {
for _, dsm := range service.dsms {
info, err := dsm.SnapshotGet(snapshotUuid)
func (service *DsmService) GetSnapshotByUuid(snapshotUuid string) *models.K8sSnapshotRespSpec {
snaps := service.ListAllSnapshots()
for _, snap := range snaps {
if snap.Uuid == snapshotUuid {
return snap
}
}
return nil
}
func (service *DsmService) DeleteSnapshot(snapshotUuid string) error {
snapshot := service.GetSnapshotByUuid(snapshotUuid)
if snapshot == nil {
return nil
}
dsm, err := service.GetDsm(snapshot.DsmIp)
if err != nil {
return err
}
if snapshot.Protocol == utils.ProtocolSmb {
if err := dsm.ShareSnapshotDelete(snapshot.Time, snapshot.ParentName); err != nil {
if snapshot := service.getSMBSnapshot(snapshotUuid); snapshot == nil { // idempotency
return nil
}
log.Errorf("Failed to delete Share snapshot [%s]. err: %v", snapshotUuid, err)
return err
}
} else if snapshot.Protocol == utils.ProtocolIscsi {
if err := dsm.SnapshotDelete(snapshotUuid); err != nil {
if _, err := dsm.SnapshotGet(snapshotUuid); err != nil { // idempotency
return nil
}
log.Errorf("Failed to delete LUN snapshot [%s]. err: %v", snapshotUuid, err)
return err
}
}
return nil
}
func (service *DsmService) listISCSISnapshotsByDsm(dsm *webapi.DSM) (infos []*models.K8sSnapshotRespSpec) {
volumes := service.listISCSIVolumes(dsm.Ip)
for _, volume := range volumes {
lunInfo := volume.Lun
lunSnaps, err := dsm.SnapshotList(lunInfo.Uuid)
if err != nil {
log.Errorf("[%s] Failed to list LUN[%s] snapshots: %v", dsm.Ip, lunInfo.Uuid, err)
continue
}
return info, nil
for _, info := range lunSnaps {
infos = append(infos, DsmLunSnapshotToK8sSnapshot(dsm.Ip, info, lunInfo))
}
}
return
}
func (service *DsmService) ListAllSnapshots() []*models.K8sSnapshotRespSpec {
var allInfos []*models.K8sSnapshotRespSpec
for _, dsm := range service.dsms {
allInfos = append(allInfos, service.listISCSISnapshotsByDsm(dsm)...)
allInfos = append(allInfos, service.listSMBSnapshotsByDsm(dsm)...)
}
return webapi.SnapshotInfo{}, status.Errorf(codes.Internal, fmt.Sprintf("No such snapshot uuid [%s]", snapshotUuid))
return allInfos
}
func (service *DsmService) ListSnapshots(volId string) []*models.K8sSnapshotRespSpec {
var allInfos []*models.K8sSnapshotRespSpec
k8sVolume := service.GetVolume(volId);
if k8sVolume == nil {
return nil
}
dsm, err := service.GetDsm(k8sVolume.DsmIp)
if err != nil {
log.Errorf("Failed to get DSM[%s]", k8sVolume.DsmIp)
return nil
}
if k8sVolume.Protocol == utils.ProtocolIscsi {
infos, err := dsm.SnapshotList(volId)
if err != nil {
log.Errorf("Failed to SnapshotList[%s]", volId)
return nil
}
for _, info := range infos {
allInfos = append(allInfos, DsmLunSnapshotToK8sSnapshot(dsm.Ip, info, k8sVolume.Lun))
}
} else {
infos, err := dsm.ShareSnapshotList(k8sVolume.Share.Name)
if err != nil {
log.Errorf("Failed to ShareSnapshotList[%s]", k8sVolume.Share.Name)
return nil
}
for _, info := range infos {
allInfos = append(allInfos, DsmShareSnapshotToK8sSnapshot(dsm.Ip, info, k8sVolume.Share))
}
}
return allInfos
}
func DsmShareSnapshotToK8sSnapshot(dsmIp string, info webapi.ShareSnapshotInfo, shareInfo webapi.ShareInfo) *models.K8sSnapshotRespSpec {
return &models.K8sSnapshotRespSpec{
DsmIp: dsmIp,
Name: strings.ReplaceAll(info.Desc, models.ShareSnapshotDescPrefix, ""), // snapshot-XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
Uuid: info.Uuid,
ParentName: shareInfo.Name,
ParentUuid: shareInfo.Uuid,
Status: "Healthy", // share snapshot always Healthy
SizeInBytes: utils.MBToBytes(shareInfo.QuotaValueInMB), // unable to get snapshot quota, return parent quota instead
CreateTime: GMTToUnixSecond(info.Time),
Time: info.Time,
RootPath: shareInfo.VolPath,
Protocol: utils.ProtocolSmb,
}
}
func DsmLunSnapshotToK8sSnapshot(dsmIp string, info webapi.SnapshotInfo, lunInfo webapi.LunInfo) *models.K8sSnapshotRespSpec {
return &models.K8sSnapshotRespSpec{
DsmIp: dsmIp,
Name: info.Name,
Uuid: info.Uuid,
ParentName: lunInfo.Name, // it can be empty for iscsi
ParentUuid: info.ParentUuid,
Status: info.Status,
SizeInBytes: info.TotalSize,
CreateTime: info.CreateTime,
Time: "",
RootPath: info.RootPath,
Protocol: utils.ProtocolIscsi,
}
}
func (service *DsmService) getISCSISnapshot(snapshotUuid string) *models.K8sSnapshotRespSpec {
for _, dsm := range service.dsms {
snapshots := service.listISCSISnapshotsByDsm(dsm)
for _, snap := range snapshots {
if snap.Uuid == snapshotUuid {
return snap
}
}
}
return nil
}

View File

@@ -0,0 +1,231 @@
/*
* Copyright 2022 Synology Inc.
*/
package service
import (
"errors"
"fmt"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"strings"
"time"
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
"github.com/SynologyOpenSource/synology-csi/pkg/models"
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
)
func GMTToUnixSecond(timeStr string) (int64) {
t, err := time.Parse("GMT-07-2006.01.02-15.04.05", timeStr)
if err != nil {
log.Error(err)
return -1
}
return t.Unix()
}
func (service *DsmService) createSMBVolumeBySnapshot(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcSnapshot *models.K8sSnapshotRespSpec) (*models.K8sVolumeRespSpec, error) {
srcShareInfo, err := dsm.ShareGet(srcSnapshot.ParentName)
if err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get share: %s, err: %v", srcSnapshot.ParentName, err))
}
shareCloneSpec := webapi.ShareCloneSpec{
Name: spec.ShareName,
Snapshot: srcSnapshot.Time,
ShareInfo: webapi.ShareInfo{
Name: spec.ShareName,
VolPath: srcSnapshot.RootPath,
Desc: "Cloned from [" + srcSnapshot.Time + "] by csi driver", // max: 64
EnableRecycleBin: srcShareInfo.EnableRecycleBin,
RecycleBinAdminOnly: srcShareInfo.RecycleBinAdminOnly,
NameOrg: srcSnapshot.ParentName,
},
}
if _, err := dsm.ShareClone(shareCloneSpec); err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source volume ID: %s, err: %v", srcShareInfo.Uuid, err))
}
shareInfo, err := dsm.ShareGet(spec.ShareName)
if err != nil {
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed Share with name: [%s], err: %v", spec.ShareName, err))
}
newSizeInMB := utils.BytesToMBCeil(spec.Size)
if shareInfo.QuotaValueInMB == 0 {
// known issue for some DS, manually set quota to the new share
if err := dsm.SetShareQuota(shareInfo, newSizeInMB); err != nil {
msg := fmt.Sprintf("Failed to set quota [%d] to Share [%s], err: %v", newSizeInMB, shareInfo.Name, err)
log.Error(msg)
return nil, status.Errorf(codes.Internal, msg)
}
shareInfo.QuotaValueInMB = newSizeInMB
}
if newSizeInMB != shareInfo.QuotaValueInMB {
// FIXME: need to delete share
return nil,
status.Errorf(codes.OutOfRange, "Requested share quotaMB [%d] is not equal to snapshot restore quotaMB [%d]", newSizeInMB, shareInfo.QuotaValueInMB)
}
log.Debugf("[%s] createSMBVolumeBySnapshot Successfully. VolumeId: %s", dsm.Ip, shareInfo.Uuid);
return DsmShareToK8sVolume(dsm.Ip, shareInfo), nil
}
func (service *DsmService) createSMBVolumeByVolume(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec, srcShareInfo webapi.ShareInfo) (*models.K8sVolumeRespSpec, error) {
newSizeInMB := utils.BytesToMBCeil(spec.Size)
if spec.Size != 0 && newSizeInMB != srcShareInfo.QuotaValueInMB {
return nil,
status.Errorf(codes.OutOfRange, "Requested share quotaMB [%d] is not equal to src share quotaMB [%d]", newSizeInMB, srcShareInfo.QuotaValueInMB)
}
shareCloneSpec := webapi.ShareCloneSpec{
Name: spec.ShareName,
Snapshot: "",
ShareInfo: webapi.ShareInfo{
Name: spec.ShareName,
VolPath: srcShareInfo.VolPath, // must be same with srcShare location
Desc: "Cloned from [" + srcShareInfo.Name + "] by csi driver", // max: 64
EnableRecycleBin: srcShareInfo.EnableRecycleBin,
RecycleBinAdminOnly: srcShareInfo.RecycleBinAdminOnly,
NameOrg: srcShareInfo.Name,
},
}
if _, err := dsm.ShareClone(shareCloneSpec); err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to create volume with source volume ID: %s, err: %v", srcShareInfo.Uuid, err))
}
shareInfo, err := dsm.ShareGet(spec.ShareName)
if err != nil {
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed Share with name: [%s], err: %v", spec.ShareName, err))
}
if shareInfo.QuotaValueInMB == 0 {
// known issue for some DS, manually set quota to the new share
if err := dsm.SetShareQuota(shareInfo, newSizeInMB); err != nil {
msg := fmt.Sprintf("Failed to set quota [%d] to Share [%s], err: %v", newSizeInMB, shareInfo.Name, err)
log.Error(msg)
return nil, status.Errorf(codes.Internal, msg)
}
shareInfo.QuotaValueInMB = newSizeInMB
}
log.Debugf("[%s] createSMBVolumeByVolume Successfully. VolumeId: %s", dsm.Ip, shareInfo.Uuid);
return DsmShareToK8sVolume(dsm.Ip, shareInfo), nil
}
func (service *DsmService) createSMBVolumeByDsm(dsm *webapi.DSM, spec *models.CreateK8sVolumeSpec) (*models.K8sVolumeRespSpec, error) {
// TODO: Check if share name is allowable
// 1. Find a available location
if spec.Location == "" {
vol, err := service.getFirstAvailableVolume(dsm, spec.Size)
if err != nil {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to get available location, err: %v", err))
}
spec.Location = vol.Path
}
// 2. Check if location exists
_, err := dsm.VolumeGet(spec.Location)
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, fmt.Sprintf("Unable to find location %s", spec.Location))
}
// 3. Create Share
sizeInMB := utils.BytesToMBCeil(spec.Size)
shareSpec := webapi.ShareCreateSpec{
Name: spec.ShareName,
ShareInfo: webapi.ShareInfo{
Name: spec.ShareName,
VolPath: spec.Location,
Desc: "Created by Synology K8s CSI",
EnableShareCow: false,
EnableRecycleBin: true,
RecycleBinAdminOnly: true,
Encryption: 0,
QuotaForCreate: &sizeInMB,
},
}
log.Debugf("ShareCreate spec: %v", shareSpec)
err = dsm.ShareCreate(shareSpec)
if err != nil && !errors.Is(err, utils.AlreadyExistError("")) {
return nil, status.Errorf(codes.Internal, fmt.Sprintf("Failed to create share, err: %v", err))
}
shareInfo, err := dsm.ShareGet(spec.ShareName)
if err != nil {
return nil,
status.Errorf(codes.Internal, fmt.Sprintf("Failed to get existed Share with name: %s, err: %v", spec.ShareName, err))
}
log.Debugf("[%s] createSMBVolumeByDsm Successfully. VolumeId: %s", dsm.Ip, shareInfo.Uuid)
return DsmShareToK8sVolume(dsm.Ip, shareInfo), nil
}
func (service *DsmService) listSMBVolumes(dsmIp string) (infos []*models.K8sVolumeRespSpec) {
for _, dsm := range service.dsms {
if dsmIp != "" && dsmIp != dsm.Ip {
continue
}
shares, err := dsm.ShareList()
if err != nil {
log.Errorf("[%s] Failed to list shares: %v", dsm.Ip, err)
continue
}
for _, share := range shares {
if !strings.HasPrefix(share.Name, models.SharePrefix) {
continue
}
infos = append(infos, DsmShareToK8sVolume(dsm.Ip, share))
}
}
return infos
}
func (service *DsmService) listSMBSnapshotsByDsm(dsm *webapi.DSM) (infos []*models.K8sSnapshotRespSpec) {
volumes := service.listSMBVolumes(dsm.Ip)
for _, volume := range volumes {
shareInfo := volume.Share
shareSnaps, err := dsm.ShareSnapshotList(shareInfo.Name)
if err != nil {
log.Errorf("[%s] Failed to list share snapshots: %v", dsm.Ip, err)
continue
}
for _, info := range shareSnaps {
infos = append(infos, DsmShareSnapshotToK8sSnapshot(dsm.Ip, info, shareInfo))
}
}
return infos
}
func (service *DsmService) getSMBSnapshot(snapshotUuid string) *models.K8sSnapshotRespSpec {
for _, dsm := range service.dsms {
snapshots := service.listSMBSnapshotsByDsm(dsm)
for _, snap := range snapshots {
if snap.Uuid == snapshotUuid {
return snap
}
}
}
return nil
}

View File

@@ -58,6 +58,7 @@ type SnapshotInfo struct {
Status string `json:"status"`
TotalSize int64 `json:"total_size"`
CreateTime int64 `json:"create_time"`
RootPath string `json:"root_path"`
}
type LunDevAttrib struct {
@@ -107,6 +108,8 @@ func errCodeMapping(errCode int, oriErr error) error {
switch errCode {
case 18990002: // Out of free space
return utils.OutOfFreeSpaceError("")
case 18990531: // No such LUN
return utils.NoSuchLunError("")
case 18990538: // Duplicated LUN name
return utils.AlreadyExistError("")
case 18990541:
@@ -124,7 +127,7 @@ func errCodeMapping(errCode int, oriErr error) error {
}
if errCode > 18990000 {
return utils.IscsiDefaultError("")
return utils.IscsiDefaultError{errCode}
}
return oriErr
}
@@ -143,7 +146,7 @@ func (dsm *DSM) LunList() ([]LunInfo, error) {
resp, err := dsm.sendRequest("", &LunInfos{}, params, "webapi/entry.cgi")
if err != nil {
return nil, err
return nil, errCodeMapping(resp.ErrorCode, err)
}
lunInfos, ok := resp.Data.(*LunInfos)
@@ -174,9 +177,8 @@ func (dsm *DSM) LunCreate(spec LunCreateSpec) (string, error) {
}
resp, err := dsm.sendRequest("", &LunCreateResp{}, params, "webapi/entry.cgi")
err = errCodeMapping(resp.ErrorCode, err)
if err != nil {
return "", err
return "", errCodeMapping(resp.ErrorCode, err)
}
lunResp, ok := resp.Data.(*LunCreateResp)
@@ -195,9 +197,9 @@ func (dsm *DSM) LunUpdate(spec LunUpdateSpec) error {
params.Add("uuid", strconv.Quote(spec.Uuid))
params.Add("new_size", strconv.FormatInt(int64(spec.NewSize), 10))
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
if err != nil {
return err
return errCodeMapping(resp.ErrorCode, err)
}
return nil
@@ -216,9 +218,9 @@ func (dsm *DSM) LunGet(uuid string) (LunInfo, error) {
}
info := Info{}
_, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
resp, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
if err != nil {
return LunInfo{}, err
return LunInfo{}, errCodeMapping(resp.ErrorCode, err)
}
return info.Lun, nil
@@ -238,9 +240,8 @@ func (dsm *DSM) LunClone(spec LunCloneSpec) (string, error) {
}
resp, err := dsm.sendRequest("", &LunCloneResp{}, params, "webapi/entry.cgi")
err = errCodeMapping(resp.ErrorCode, err)
if err != nil {
return "", err
return "", errCodeMapping(resp.ErrorCode, err)
}
cloneLunResp, ok := resp.Data.(*LunCloneResp)
@@ -264,7 +265,7 @@ func (dsm *DSM) TargetList() ([]TargetInfo, error) {
resp, err := dsm.sendRequest("", &TargetInfos{}, params, "webapi/entry.cgi")
if err != nil {
return nil, err
return nil, errCodeMapping(resp.ErrorCode, err)
}
trgInfos, ok := resp.Data.(*TargetInfos)
@@ -287,9 +288,9 @@ func (dsm *DSM) TargetGet(targetId string) (TargetInfo, error) {
}
info := Info{}
_, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
resp, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
if err != nil {
return TargetInfo{}, err
return TargetInfo{}, errCodeMapping(resp.ErrorCode, err)
}
return info.Target, nil
@@ -304,9 +305,9 @@ func (dsm *DSM) TargetSet(targetId string, maxSession int) error {
params.Add("target_id", strconv.Quote(targetId))
params.Add("max_sessions", strconv.Itoa(maxSession))
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
if err != nil {
return err
return errCodeMapping(resp.ErrorCode, err)
}
return nil
@@ -326,9 +327,8 @@ func (dsm *DSM) TargetCreate(spec TargetCreateSpec) (string, error) {
}
resp, err := dsm.sendRequest("", &TrgCreateResp{}, params, "webapi/entry.cgi")
err = errCodeMapping(resp.ErrorCode, err)
if err != nil {
return "", err
return "", errCodeMapping(resp.ErrorCode, err)
}
trgResp, ok := resp.Data.(*TrgCreateResp)
@@ -351,9 +351,9 @@ func (dsm *DSM) LunMapTarget(targetIds []string, lunUuid string) error {
log.Debugln(params)
}
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
if err != nil {
return err
return errCodeMapping(resp.ErrorCode, err)
}
return nil
}
@@ -365,9 +365,9 @@ func (dsm *DSM) LunDelete(lunUuid string) error {
params.Add("version", "1")
params.Add("uuid", strconv.Quote(lunUuid))
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
if err != nil {
return err
return errCodeMapping(resp.ErrorCode, err)
}
return nil
}
@@ -379,9 +379,9 @@ func (dsm *DSM) TargetDelete(targetName string) error {
params.Add("version", "1")
params.Add("target_id", strconv.Quote(targetName))
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
if err != nil {
return err
return errCodeMapping(resp.ErrorCode, err)
}
return nil
}

View File

@@ -7,21 +7,109 @@ import (
"fmt"
"net/url"
"strconv"
log "github.com/sirupsen/logrus"
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
"github.com/SynologyOpenSource/synology-csi/pkg/logger"
)
type ShareInfo struct {
Name string `json:"name"`
VolPath string `json:"vol_path"`
Name string `json:"name"` // required
VolPath string `json:"vol_path"` // required
Desc string `json:"desc"`
EnableShareCow bool `json:"enable_share_cow"`
EnableShareCow bool `json:"enable_share_cow"` // field for create
EnableRecycleBin bool `json:"enable_recycle_bin"`
RecycleBinAdminOnly bool `json:"recycle_bin_admin_only"`
Encryption int `json:"encryption"`
Encryption int `json:"encryption"` // field for create
QuotaForCreate *int64 `json:"share_quota,omitempty"`
QuotaValueInMB int64 `json:"quota_value"` // field for get
SupportSnapshot bool `json:"support_snapshot"` // field for get
Uuid string `json:"uuid"` // field for get
NameOrg string `json:"name_org"` // required for clone
}
type ShareUpdateInfo struct {
Name string `json:"name"` // required
VolPath string `json:"vol_path"` // required
QuotaForCreate *int64 `json:"share_quota,omitempty"`
// Add properties you want to update to shares here
}
type ShareSnapshotInfo struct {
Uuid string `json:"ruuid"`
Time string `json:"time"`
Desc string `json:"desc"`
SnapSize string `json:"snap_size"` // the complete size of the snapshot
Lock bool `json:"lock"`
ScheduleSnapshot bool `json:"schedule_snapshot"`
}
type ShareCreateSpec struct {
Name string `json:"name"`
ShareInfo ShareInfo `json:"shareinfo"`
Name string
ShareInfo ShareInfo
}
type ShareCloneSpec struct {
Name string
Snapshot string
ShareInfo ShareInfo
}
type ShareSnapshotCreateSpec struct {
ShareName string
Desc string
IsLocked bool
}
type SharePermissionSetSpec struct {
Name string
UserGroupType string // "local_user"/"local_group"/"system"
Permissions []*SharePermission
}
type SharePermission struct {
Name string `json:"name"`
IsReadonly bool `json:"is_readonly"`
IsWritable bool `json:"is_writable"`
IsDeny bool `json:"is_deny"`
IsCustom bool `json:"is_custom,omitempty"`
IsAdmin bool `json:"is_admin,omitempty"` // field for list
}
func shareErrCodeMapping(errCode int, oriErr error) error {
switch errCode {
case 402: // No such share
return utils.NoSuchShareError("")
case 403: // Invalid input value
return utils.BadParametersError("")
case 3301: // already exists
return utils.AlreadyExistError("")
case 3309:
return utils.ShareReachMaxCountError("")
case 3328:
return utils.ShareSystemBusyError("")
}
if errCode >= 3300 {
return utils.ShareDefaultError{errCode}
}
return oriErr
}
// ----------------------- Share APIs -----------------------
func (dsm *DSM) ShareGet(shareName string) (ShareInfo, error) {
params := url.Values{}
params.Add("api", "SYNO.Core.Share")
params.Add("method", "get")
params.Add("version", "1")
params.Add("additional", "[\"encryption\", \"enable_share_cow\", \"recyclebin\", \"support_snapshot\", \"share_quota\"]")
params.Add("name", strconv.Quote(shareName))
info := ShareInfo{}
resp, err := dsm.sendRequest("", &info, params, "webapi/entry.cgi")
return info, shareErrCodeMapping(resp.ErrorCode, err)
}
func (dsm *DSM) ShareList() ([]ShareInfo, error) {
@@ -29,7 +117,7 @@ func (dsm *DSM) ShareList() ([]ShareInfo, error) {
params.Add("api", "SYNO.Core.Share")
params.Add("method", "list")
params.Add("version", "1")
params.Add("additional", "[\"encryption\"]")
params.Add("additional", "[\"encryption\", \"enable_share_cow\", \"recyclebin\", \"support_snapshot\", \"share_quota\"]")
type ShareInfos struct {
Shares []ShareInfo `json:"shares"`
@@ -37,7 +125,7 @@ func (dsm *DSM) ShareList() ([]ShareInfo, error) {
resp, err := dsm.sendRequest("", &ShareInfos{}, params, "webapi/entry.cgi")
if err != nil {
return nil, err
return nil, shareErrCodeMapping(resp.ErrorCode, err)
}
infos, ok := resp.Data.(*ShareInfos)
@@ -61,13 +149,49 @@ func (dsm *DSM) ShareCreate(spec ShareCreateSpec) error {
}
params.Add("shareinfo", string(js))
// response : {"data":{"name":"Share-2"},"success":true}
_, err = dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
if err != nil {
return err
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
return shareErrCodeMapping(resp.ErrorCode, err)
}
func (dsm *DSM) ShareClone(spec ShareCloneSpec) (string, error) {
params := url.Values{}
params.Add("api", "SYNO.Core.Share")
params.Add("method", "clone")
params.Add("version", "1")
params.Add("name", strconv.Quote(spec.Name))
// if clone from snapshot, the NameOrg must be the parent of the snapshot, or the webapi will return 3300
// if the snapshot doesn't exist, it will return 3300 too.
if spec.ShareInfo.NameOrg == "" {
return "", fmt.Errorf("Clone failed. The source name can't be empty.")
}
return nil
if spec.Snapshot != "" {
params.Add("snapshot", strconv.Quote(spec.Snapshot))
}
js, err := json.Marshal(spec.ShareInfo)
if err != nil {
return "", err
}
params.Add("shareinfo", string(js))
type ShareCreateResp struct {
Name string `json:"name"`
}
resp, err := dsm.sendRequest("", &ShareCreateResp{}, params, "webapi/entry.cgi")
if err != nil {
return "", shareErrCodeMapping(resp.ErrorCode, err)
}
shareResp, ok := resp.Data.(*ShareCreateResp)
if !ok {
return "", fmt.Errorf("Failed to assert response to %T", &ShareCreateResp{})
}
return shareResp.Name, nil
}
func (dsm *DSM) ShareDelete(shareName string) error {
@@ -75,13 +199,165 @@ func (dsm *DSM) ShareDelete(shareName string) error {
params.Add("api", "SYNO.Core.Share")
params.Add("method", "delete")
params.Add("version", "1")
params.Add("name", fmt.Sprintf("[%s]", strconv.Quote(shareName)))
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
return shareErrCodeMapping(resp.ErrorCode, err)
}
func (dsm *DSM) ShareSet(shareName string, updateInfo ShareUpdateInfo) error {
params := url.Values{}
params.Add("api", "SYNO.Core.Share")
params.Add("method", "set")
params.Add("version", "1")
params.Add("name", strconv.Quote(shareName))
// response : {"success":true}
_, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
js, err := json.Marshal(updateInfo)
if err != nil {
return err
}
params.Add("shareinfo", string(js))
if logger.WebapiDebug {
log.Debugln(params)
}
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
return shareErrCodeMapping(resp.ErrorCode, err)
}
func (dsm *DSM) SetShareQuota(shareInfo ShareInfo, newSizeInMB int64) error {
updateInfo := ShareUpdateInfo{
Name: shareInfo.Name,
VolPath: shareInfo.VolPath,
QuotaForCreate: &newSizeInMB,
}
return dsm.ShareSet(shareInfo.Name, updateInfo)
}
// ----------------------- Share Snapshot APIs -----------------------
func (dsm *DSM) ShareSnapshotCreate(spec ShareSnapshotCreateSpec) (string, error) {
params := url.Values{}
params.Add("api", "SYNO.Core.Share.Snapshot")
params.Add("method", "create")
params.Add("version", "1")
params.Add("name", strconv.Quote(spec.ShareName))
type SnapInfo struct {
Desc string `json:"desc"`
IsLocked bool `json:"lock"` // default true
}
snapinfo := SnapInfo{
Desc: spec.Desc,
IsLocked: spec.IsLocked,
}
js, err := json.Marshal(snapinfo)
if err != nil {
return "", err
}
params.Add("snapinfo", string(js))
var snapTime string
resp, err := dsm.sendRequest("", &snapTime, params, "webapi/entry.cgi")
if err != nil {
return "", shareErrCodeMapping(resp.ErrorCode, err)
}
return snapTime, nil // "GMT+08-2022.01.14-19.18.29"
}
func (dsm *DSM) ShareSnapshotList(name string) ([]ShareSnapshotInfo, error) {
params := url.Values{}
params.Add("api", "SYNO.Core.Share.Snapshot")
params.Add("method", "list")
params.Add("version", "2")
params.Add("name", strconv.Quote(name))
params.Add("additional", "[\"desc\", \"lock\", \"schedule_snapshot\", \"ruuid\", \"snap_size\"]")
type Infos struct {
Snapshots []ShareSnapshotInfo `json:"snapshots"`
Total int `json:"total"`
}
resp, err := dsm.sendRequest("", &Infos{}, params, "webapi/entry.cgi")
if err != nil {
return nil, shareErrCodeMapping(resp.ErrorCode, err)
}
infos, ok := resp.Data.(*Infos)
if !ok {
return nil, fmt.Errorf("Failed to assert response to %T", &Infos{})
}
return infos.Snapshots, nil
}
func (dsm *DSM) ShareSnapshotDelete(snapTime string, shareName string) error {
params := url.Values{}
params.Add("api", "SYNO.Core.Share.Snapshot")
params.Add("method", "delete")
params.Add("version", "1")
params.Add("name", strconv.Quote(shareName))
params.Add("snapshots", fmt.Sprintf("[%s]", strconv.Quote(snapTime))) // ["GMT+08-2022.01.14-19.18.29"]
var objmap []map[string]interface{}
resp, err := dsm.sendRequest("", &objmap, params, "webapi/entry.cgi")
if err != nil {
return shareErrCodeMapping(resp.ErrorCode, err)
}
if len(objmap) > 0 {
return fmt.Errorf("Failed to delete snapshot, API common error. snapshot: %s", snapTime)
}
return nil
}
// ----------------------- Share Permission APIs -----------------------
func (dsm *DSM) SharePermissionSet(spec SharePermissionSetSpec) error {
params := url.Values{}
params.Add("api", "SYNO.Core.Share.Permission")
params.Add("method", "set")
params.Add("version", "1")
params.Add("name", strconv.Quote(spec.Name))
params.Add("user_group_type", strconv.Quote(spec.UserGroupType))
js, err := json.Marshal(spec.Permissions)
if err != nil {
return err
}
params.Add("permissions", string(js))
resp, err := dsm.sendRequest("", &struct{}{}, params, "webapi/entry.cgi")
return shareErrCodeMapping(resp.ErrorCode, err)
}
func (dsm *DSM) SharePermissionList(shareName string, userGroupType string) ([]SharePermission, error) {
params := url.Values{}
params.Add("api", "SYNO.Core.Share.Permission")
params.Add("method", "list")
params.Add("version", "1")
params.Add("name", strconv.Quote(shareName))
params.Add("user_group_type", strconv.Quote(userGroupType))
type SharePermissions struct {
Permissions []SharePermission `json:"items"`
}
resp, err := dsm.sendRequest("", &SharePermissions{}, params, "webapi/entry.cgi")
if err != nil {
return nil, shareErrCodeMapping(resp.ErrorCode, err)
}
infos, ok := resp.Data.(*SharePermissions)
if !ok {
return nil, fmt.Errorf("Failed to assert response to %T", &SharePermissions{})
}
return infos.Permissions, nil
}

View File

@@ -16,14 +16,15 @@ type IDsmService interface {
GetDsm(ip string) (*webapi.DSM, error)
GetDsmsCount() int
ListDsmVolumes(ip string) ([]webapi.VolInfo, error)
CreateVolume(spec *models.CreateK8sVolumeSpec) (webapi.LunInfo, string, error)
DeleteVolume(volumeId string) error
ListVolumes() []*models.ListK8sVolumeRespSpec
GetVolume(lunUuid string) *models.ListK8sVolumeRespSpec
ExpandLun(lunUuid string, newSize int64) error
CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSpec) (string, error)
CreateVolume(spec *models.CreateK8sVolumeSpec) (*models.K8sVolumeRespSpec, error)
DeleteVolume(volId string) error
ListVolumes() []*models.K8sVolumeRespSpec
GetVolume(volId string) *models.K8sVolumeRespSpec
ExpandVolume(volId string, newSize int64) (*models.K8sVolumeRespSpec, error)
CreateSnapshot(spec *models.CreateK8sVolumeSnapshotSpec) (*models.K8sSnapshotRespSpec, error)
DeleteSnapshot(snapshotUuid string) error
ListAllSnapshots() ([]webapi.SnapshotInfo, error)
ListSnapshots(lunUuid string) ([]webapi.SnapshotInfo, error)
GetSnapshot(snapshotUuid string) (webapi.SnapshotInfo, error)
ListAllSnapshots() []*models.K8sSnapshotRespSpec
ListSnapshots(volId string) []*models.K8sSnapshotRespSpec
GetVolumeByName(volName string) *models.K8sVolumeRespSpec
GetSnapshotByName(snapshotName string) *models.K8sSnapshotRespSpec
}

View File

@@ -2,6 +2,10 @@
package models
import (
"fmt"
)
const (
K8sCsiName = "Kubernetes CSI"
@@ -15,8 +19,30 @@ const (
LunTypeBlunThick = "BLUN_THICK" // thick provision, mapped to type 259
MaxIqnLen = 128
// Share definitions
MaxShareLen = 32
MaxShareDescLen = 64
UserGroupTypeLocalUser = "local_user"
UserGroupTypeLocalGroup = "local_group"
UserGroupTypeSystem = "system"
// CSI definitions
TargetPrefix = "k8s-csi"
LunPrefix = "k8s-csi"
IqnPrefix = "iqn.2000-01.com.synology:"
)
TargetPrefix = "k8s-csi"
LunPrefix = "k8s-csi"
IqnPrefix = "iqn.2000-01.com.synology:"
SharePrefix = "k8s-csi"
ShareSnapshotDescPrefix = "(Do not change)"
)
func GenLunName(volName string) string {
return fmt.Sprintf("%s-%s", LunPrefix, volName)
}
func GenShareName(volName string) string {
shareName := fmt.Sprintf("%s-%s", SharePrefix, volName)
if len(shareName) > MaxShareLen {
return shareName[:MaxShareLen]
}
return shareName
}

View File

@@ -3,6 +3,8 @@
package models
import (
"fmt"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
)
@@ -10,21 +12,43 @@ type CreateK8sVolumeSpec struct {
DsmIp string
K8sVolumeName string
LunName string
ShareName string
Location string
Size int64
Type string
ThinProvisioning bool
TargetName string
TargetIqn string
MultipleSession bool
SourceSnapshotId string
SourceVolumeId string
Protocol string
}
type ListK8sVolumeRespSpec struct {
DsmIp string
Lun webapi.LunInfo
Target webapi.TargetInfo
type K8sVolumeRespSpec struct {
DsmIp string
VolumeId string
SizeInBytes int64
Location string
Name string
Source string
Lun webapi.LunInfo
Target webapi.TargetInfo
Share webapi.ShareInfo
Protocol string
}
type K8sSnapshotRespSpec struct {
DsmIp string
Name string
Uuid string
ParentName string
ParentUuid string
Status string
SizeInBytes int64
CreateTime int64
Time string // only for share snapshot delete
RootPath string
Protocol string
}
type CreateK8sVolumeSnapshotSpec struct {
@@ -33,4 +57,24 @@ type CreateK8sVolumeSnapshotSpec struct {
Description string
TakenBy string
IsLocked bool
}
}
type NodeStageVolumeSpec struct {
VolumeId string
StagingTargetPath string
VolumeCapability *csi.VolumeCapability
Dsm string
Source string
}
type ByVolumeId []*K8sVolumeRespSpec
func (a ByVolumeId) Len() int { return len(a) }
func (a ByVolumeId) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a ByVolumeId) Less(i, j int) bool { return a[i].VolumeId < a[j].VolumeId }
type BySnapshotAndParentUuid []*K8sSnapshotRespSpec
func (a BySnapshotAndParentUuid) Len() int { return len(a) }
func (a BySnapshotAndParentUuid) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a BySnapshotAndParentUuid) Less(i, j int) bool {
return fmt.Sprintf("%s/%s", a[i].ParentUuid, a[i].Uuid) < fmt.Sprintf("%s/%s", a[j].ParentUuid, a[j].Uuid)
}

View File

@@ -2,14 +2,28 @@
package utils
import (
"fmt"
)
type OutOfFreeSpaceError string
type AlreadyExistError string
type BadParametersError string
type NoSuchLunError string
type LunReachMaxCountError string
type TargetReachMaxCountError string
type NoSuchSnapshotError string
type BadLunTypeError string
type SnapshotReachMaxCountError string
type IscsiDefaultError string
type IscsiDefaultError struct {
ErrCode int
}
type NoSuchShareError string
type ShareReachMaxCountError string
type ShareSystemBusyError string
type ShareDefaultError struct {
ErrCode int
}
func (_ OutOfFreeSpaceError) Error() string {
return "Out of free space"
@@ -17,6 +31,14 @@ func (_ OutOfFreeSpaceError) Error() string {
func (_ AlreadyExistError) Error() string {
return "Already Existed"
}
func (_ BadParametersError) Error() string {
return "Invalid input value"
}
// ISCSI errors
func (_ NoSuchLunError) Error() string {
return "No such LUN"
}
func (_ LunReachMaxCountError) Error() string {
return "Number of LUN reach limit"
@@ -38,6 +60,23 @@ func (_ SnapshotReachMaxCountError) Error() string {
return "Number of snapshot reach limit"
}
func (_ IscsiDefaultError) Error() string {
return "Default ISCSI error"
}
func (e IscsiDefaultError) Error() string {
return fmt.Sprintf("ISCSI API error. Error code: %d", e.ErrCode)
}
// Share errors
func (_ NoSuchShareError) Error() string {
return "No such share"
}
func (_ ShareReachMaxCountError) Error() string {
return "Number of share reach limit"
}
func (_ ShareSystemBusyError) Error() string {
return "Share system is temporary busy"
}
func (e ShareDefaultError) Error() string {
return fmt.Sprintf("Share API error. Error code: %d", e.ErrCode)
}

View File

@@ -8,7 +8,42 @@ import (
"strings"
)
const UNIT_GB = 1024 * 1024 * 1024
type AuthType string
const (
UNIT_GB = 1024 * 1024 * 1024
UNIT_MB = 1024 * 1024
ProtocolSmb = "smb"
ProtocolIscsi = "iscsi"
ProtocolDefault = ProtocolIscsi
AuthTypeReadWrite AuthType = "rw"
AuthTypeReadOnly AuthType = "ro"
AuthTypeNoAccess AuthType = "no"
)
func SliceContains(items []string, s string) bool {
for _, item := range items {
if s == item {
return true
}
}
return false
}
func MBToBytes(size int64) int64 {
return size * UNIT_MB
}
func BytesToMB(size int64) int64 {
return size / UNIT_MB
}
// Ceiling
func BytesToMBCeil(size int64) int64 {
return (size + UNIT_MB - 1) / UNIT_MB
}
func StringToBoolean(value string) bool {
value = strings.ToLower(value)
@@ -30,4 +65,4 @@ func LookupIPv4(name string) ([]string, error) {
}
return nil, fmt.Errorf("Failed to LookupIPv4 by local resolver for: %s", name)
}
}

View File

@@ -1,12 +1,39 @@
#!/bin/bash
plugin_name="csi.san.synology.com"
deploy_k8s_version="v1.19"
min_support_minor=19
max_support_minor=20
deploy_k8s_version="v1".$min_support_minor
SCRIPT_PATH="$(realpath "$0")"
SOURCE_PATH="$(realpath "$(dirname "${SCRIPT_PATH}")"/../)"
config_file="${SOURCE_PATH}/config/client-info.yml"
plugin_dir="/var/lib/kubelet/plugins/$plugin_name"
parse_version(){
ver=$(kubectl version --short | grep Server | awk '{print $3}')
major=$(echo "${ver##*v}" | cut -d'.' -f1)
minor=$(echo "${ver##*v}" | cut -d'.' -f2)
if [[ "$major" != 1 ]]; then
echo "Version not supported: $ver"
exit 1
fi
case "$minor" in
19|20)
deploy_k8s_version="v1".$minor
;;
*)
if [[ $minor -lt $min_support_minor ]]; then
deploy_k8s_version="v1".$min_support_minor
else
deploy_k8s_version="v1".$max_support_minor
fi
;;
esac
echo "Deploy Version: $deploy_k8s_version"
}
# 1. Build
csi_build(){
echo "==== Build synology-csi .... ===="
@@ -16,6 +43,7 @@ csi_build(){
# 2. Install
csi_install(){
echo "==== Creates namespace and secrets, then installs synology-csi ===="
parse_version
kubectl create ns synology-csi
kubectl create secret -n synology-csi generic client-info-secret --from-file="$config_file"

View File

@@ -7,6 +7,7 @@ import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/common"
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
)
@@ -51,6 +52,30 @@ var cmdDsmLogin = &cobra.Command{
},
}
// Always get the first client from ClientInfo for synocli testing
func LoginDsmForTest() (*webapi.DSM, error) {
info, err := common.LoadConfig("./config/client-info.yml")
if err != nil {
return nil, fmt.Errorf("Failed to read config: %v", err)
}
if len(info.Clients) == 0 {
return nil, fmt.Errorf("No client in client-info.yml")
}
dsm := &webapi.DSM{
Ip: info.Clients[0].Host,
Port: info.Clients[0].Port,
Username: info.Clients[0].Username,
Password: info.Clients[0].Password,
Https: info.Clients[0].Https,
}
if err := dsm.Login(); err != nil {
return nil, fmt.Errorf("Failed to login to DSM: [%s]. err: %v", dsm.Ip, err)
}
return dsm, nil
}
func init() {
cmdDsm.AddCommand(cmdDsmLogin)

View File

@@ -16,6 +16,7 @@ var rootCmd = &cobra.Command{
func init() {
rootCmd.AddCommand(cmdDsm)
rootCmd.AddCommand(cmdShare)
}
func Execute() {

580
synocli/cmd/share.go Normal file
View File

@@ -0,0 +1,580 @@
/*
* Copyright 2022 Synology Inc.
*/
package cmd
import (
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
"github.com/SynologyOpenSource/synology-csi/pkg/dsm/webapi"
"github.com/SynologyOpenSource/synology-csi/pkg/utils"
"strconv"
"text/tabwriter"
)
var cmdShare = &cobra.Command{
Use: "share",
Short: "share API",
Long: `DSM share API`,
Run: func(cmd *cobra.Command, args []string) {
cmd.Help()
},
}
var cmdShareGet = &cobra.Command{
Use: "get <name>",
Short: "get share",
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
dsm, err := LoginDsmForTest()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer func() {
dsm.Logout()
}()
share, err := dsm.ShareGet(args[0])
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Success, ShareGet(%s) = %#v\n", args[0], share)
},
}
var cmdShareList = &cobra.Command{
Use: "list",
Short: "list shares",
Args: cobra.MinimumNArgs(0),
Run: func(cmd *cobra.Command, args []string) {
dsm, err := LoginDsmForTest()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer func() {
dsm.Logout()
}()
shares, err := dsm.ShareList()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
tw := tabwriter.NewWriter(os.Stdout, 8, 0, 2, ' ', 0)
fmt.Fprintf(tw, "%s\t%-32s\t%-10s\t%-64s\t%s\t%s\t%s\t%s\t%-10s\t%s\t%s\n",
"id:", "Name:", "VolPath", "Desc:", "Quota(MB):", "ShareCow:", "RecycleBin:", "RBAdminOnly:", "Encryption:", "CanSnap:", "Uuid:")
for i, info := range shares {
fmt.Fprintf(tw, "%d\t%-32s\t", i, info.Name)
fmt.Fprintf(tw, "%-10s\t", info.VolPath)
fmt.Fprintf(tw, "%-64s\t", info.Desc)
fmt.Fprintf(tw, "%d\t", info.QuotaValueInMB)
fmt.Fprintf(tw, "%v\t", info.EnableShareCow)
fmt.Fprintf(tw, "%v\t", info.EnableRecycleBin)
fmt.Fprintf(tw, "%v\t", info.RecycleBinAdminOnly)
fmt.Fprintf(tw, "%-10d\t", info.Encryption)
fmt.Fprintf(tw, "%v\t", info.SupportSnapshot)
fmt.Fprintf(tw, "%s\t", info.Uuid)
fmt.Fprintf(tw, "\n")
_ = tw.Flush()
}
fmt.Printf("Success, ShareList()\n")
},
}
var cmdShareCreate = &cobra.Command{
Use: "create <name> <location> [<size_bytes>]",
Short: "create share",
Args: cobra.MinimumNArgs(2),
Run: func(cmd *cobra.Command, args []string) {
dsm, err := LoginDsmForTest()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer func() {
dsm.Logout()
}()
var size int64 = 0
if len(args) >= 3 {
size, err = strconv.ParseInt(args[2], 10, 64)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
sizeInMB := utils.BytesToMBCeil(size)
testSpec := webapi.ShareCreateSpec{
Name: args[0],
ShareInfo: webapi.ShareInfo{
Name: args[0],
VolPath: args[1],
Desc: "Created by synocli",
EnableShareCow: false,
EnableRecycleBin: true,
RecycleBinAdminOnly: true,
Encryption: 0,
QuotaForCreate: &sizeInMB,
},
}
fmt.Printf("spec = %#v, sizeInMB = %d\n", testSpec, sizeInMB)
err = dsm.ShareCreate(testSpec)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
share, err := dsm.ShareGet(args[0])
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Success, ShareCreate(%s) resp = %#v\n", args[0], share)
},
}
var cmdShareDelete = &cobra.Command{
Use: "delete <name>",
Short: "delete share",
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
dsm, err := LoginDsmForTest()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer func() {
dsm.Logout()
}()
err = dsm.ShareDelete(args[0])
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Success, ShareDelete(%s)\n", args[0])
},
}
var cmdShareClone = &cobra.Command{
Use: "clone <new name> <src name> <is_snapshot [true|false]>",
Short: "clone share",
Args: cobra.MinimumNArgs(2),
Run: func(cmd *cobra.Command, args []string) {
dsm, err := LoginDsmForTest()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer func() {
dsm.Logout()
}()
fromSnapshot := false
if len(args) >= 3 && args[2] == "true" {
fromSnapshot = true
}
newName := args[0]
srcName := args[1]
orgShareName := ""
snapshot := ""
if fromSnapshot {
snapshot = srcName
shares, err := dsm.ShareList()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for _, share := range shares {
snaps, err := dsm.ShareSnapshotList(share.Name)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
for _, snap := range snaps {
if snap.Time != srcName {
continue
}
orgShareName = share.Name
break
}
if orgShareName != "" {
break
}
}
if orgShareName == "" {
fmt.Println("Failed to find org Share of the snapshot")
os.Exit(1)
}
} else {
orgShareName = srcName
}
srcShare, err := dsm.ShareGet(orgShareName)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
shareSpec := webapi.ShareCloneSpec{
Name: newName,
Snapshot: snapshot,
ShareInfo: webapi.ShareInfo{
Name: newName,
VolPath: srcShare.VolPath,
Desc: "Cloned from "+srcName+" by synocli",
EnableRecycleBin: srcShare.EnableRecycleBin,
RecycleBinAdminOnly: srcShare.RecycleBinAdminOnly,
NameOrg: orgShareName,
},
}
fmt.Printf("newName: %s, fromSnapshot: %v (%s), orgShareName: %s\n", newName, fromSnapshot, snapshot, orgShareName)
_, err = dsm.ShareClone(shareSpec)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
share, err := dsm.ShareGet(newName)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Success, ShareClone(%s, %s) new share = %#v\n", newName, srcName, share)
},
}
var cmdShareSnapshotCreate = &cobra.Command{
Use: "snap_create <src name> <desc> <is_lock [true|false]>",
Short: "create share snapshot (only share located in btrfs volume can take snapshots)",
Args: cobra.MinimumNArgs(3),
Run: func(cmd *cobra.Command, args []string) {
dsm, err := LoginDsmForTest()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer func() {
dsm.Logout()
}()
spec := webapi.ShareSnapshotCreateSpec{
ShareName: args[0],
Desc: args[1],
IsLocked: utils.StringToBoolean(args[2]),
}
fmt.Printf("spec = %#v\n", spec)
snapTime, err := dsm.ShareSnapshotCreate(spec)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("resp = %s\n", snapTime)
snaps, err := dsm.ShareSnapshotList(spec.ShareName)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
var snapshot webapi.ShareSnapshotInfo
for _, snap := range snaps {
if snap.Time == snapTime {
snapshot = snap
break
}
}
fmt.Printf("Success, ShareSnapshotCreate(%s), snap = %#v\n", args[0], snapshot)
},
}
var cmdShareSnapshotDelete = &cobra.Command{
Use: "snap_delete <share name> <snapshot time>",
Short: "delete share snapshot",
Args: cobra.MinimumNArgs(2),
Run: func(cmd *cobra.Command, args []string) {
dsm, err := LoginDsmForTest()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer func() {
dsm.Logout()
}()
if err := dsm.ShareSnapshotDelete(args[1], args[0]); err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Success, ShareSnapshotDelete(%s, %s)\n", args[1], args[0])
},
}
func shareSnapshotListAll(dsm *webapi.DSM, shareName string) ([]webapi.ShareSnapshotInfo, error) {
if shareName != "" {
return dsm.ShareSnapshotList(shareName)
}
var infos []webapi.ShareSnapshotInfo
shares, err := dsm.ShareList()
if err != nil {
fmt.Println(err)
return nil, err
}
for _, share := range shares {
snaps, err := dsm.ShareSnapshotList(share.Name)
if err != nil {
fmt.Println(err)
return nil, err
}
infos = append(infos, snaps...)
}
return infos, nil
}
var cmdShareSnapshotList = &cobra.Command{
Use: "snap_list [<share name>]",
Short: "list share snapshots",
Args: cobra.MinimumNArgs(0),
Run: func(cmd *cobra.Command, args []string) {
dsm, err := LoginDsmForTest()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer func() {
dsm.Logout()
}()
shareName := ""
if len(args) >= 1 {
shareName = args[0]
}
snaps, err := shareSnapshotListAll(dsm, shareName)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
tw := tabwriter.NewWriter(os.Stdout, 8, 0, 2, ' ', 0)
fmt.Fprintf(tw, "id:\tTime:\tDesc:\tLock:\tScheduleSnapshot:\n")
for i, info := range snaps {
fmt.Fprintf(tw, "%d\t%-32s\t", i, info.Time)
fmt.Fprintf(tw, "%-32s\t", info.Desc)
fmt.Fprintf(tw, "%v\t", info.Lock)
fmt.Fprintf(tw, "%v\t", info.ScheduleSnapshot)
fmt.Fprintf(tw, "\n")
_ = tw.Flush()
}
fmt.Printf("Success, ShareSnapList(%s)\n", shareName)
},
}
func createSharePermission(name string, acl string) *webapi.SharePermission {
var permission webapi.SharePermission
permission.Name = name
switch strings.ToLower(acl) {
case "rw":
permission.IsWritable = true
case "ro":
permission.IsReadonly = true
case "no":
permission.IsDeny = true
default:
fmt.Println("[ERROR] Unknown ACL")
return nil
}
return &permission
}
func getShareLocalUserPermission(dsm *webapi.DSM, shareName string, userName string) (*webapi.SharePermission, error) {
infos, err := dsm.SharePermissionList(shareName, "local_user")
if err != nil {
return nil, err
}
for _, info := range infos {
if info.Name == userName {
return &info, nil
}
}
return nil, fmt.Errorf("Permission Not Found.")
}
var cmdSharePermissionList = &cobra.Command{
Use: "permission_list <name> [local_user|local_group|system]",
Short: "list permissions",
Args: cobra.MinimumNArgs(1),
Run: func(cmd *cobra.Command, args []string) {
dsm, err := LoginDsmForTest()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer func() {
dsm.Logout()
}()
userGroupType := "local_user"
if len(args) >= 2 {
userGroupType = args[1]
}
infos, err := dsm.SharePermissionList(args[0], userGroupType)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
tw := tabwriter.NewWriter(os.Stdout, 8, 0, 2, ' ', 0)
fmt.Fprintf(tw, "%s\t%-20s\t%-10s\t%-12s\t%-12s\t%-10s\t%-10s\n", "id:", "Name:", "IsAdmin", "IsReadonly:", "IsWritable:", "IsDeny:", "IsCustom:")
for i, info := range infos {
fmt.Fprintf(tw, "%d\t", i)
fmt.Fprintf(tw, "%-20s\t", info.Name)
fmt.Fprintf(tw, "%-10v\t", info.IsAdmin)
fmt.Fprintf(tw, "%-12v\t", info.IsReadonly)
fmt.Fprintf(tw, "%-12v\t", info.IsWritable)
fmt.Fprintf(tw, "%-10v\t", info.IsDeny)
fmt.Fprintf(tw, "%-10v\t", info.IsCustom)
fmt.Fprintf(tw, "\n")
_ = tw.Flush()
}
fmt.Printf("Success, SharePermissionList(%s)\n", args[0])
},
}
var cmdSharePermissionSet = &cobra.Command{
Use: "permission_set <share name> <username> <[rw|ro|no]>",
Short: "set permission",
Args: cobra.MinimumNArgs(3),
Run: func(cmd *cobra.Command, args []string) {
dsm, err := LoginDsmForTest()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer func() {
dsm.Logout()
}()
shareName := args[0]
userName := args[1]
userGroupType := "local_user"
permission := createSharePermission(userName, args[2])
if permission == nil {
fmt.Println("Failed. Invalid Argument.")
os.Exit(1)
}
permissions := append([]*webapi.SharePermission{}, permission)
spec := webapi.SharePermissionSetSpec{
Name: shareName,
UserGroupType: userGroupType,
Permissions: permissions,
}
fmt.Printf("spec = %#v\n", spec)
if err := dsm.SharePermissionSet(spec); err != nil {
fmt.Println(err)
os.Exit(1)
}
newPermission, err := getShareLocalUserPermission(dsm, shareName, userName)
if err != nil {
fmt.Printf("Failed to get share local_user permission(%s, %s): %v\n", shareName, userName, err)
os.Exit(1)
}
fmt.Printf("Success, SharePermissionSet(%s), newPermission: %#v\n", shareName, newPermission)
},
}
var cmdShareSet = &cobra.Command{
Use: "set <share name> <new size>",
Short: "share set (only share located in btrfs volume can be resized)",
Args: cobra.MinimumNArgs(2),
Run: func(cmd *cobra.Command, args []string) {
dsm, err := LoginDsmForTest()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
defer func() {
dsm.Logout()
}()
shareName := args[0]
newSize, err := strconv.ParseInt(args[1], 10, 64)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
share, err := dsm.ShareGet(shareName)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("old = %#v\n", share)
newSizeInMB := utils.BytesToMBCeil(newSize)
updateInfo := webapi.ShareUpdateInfo{
Name: share.Name,
VolPath: share.VolPath,
QuotaForCreate: &newSizeInMB,
}
if err := dsm.ShareSet(shareName, updateInfo); err != nil {
fmt.Println(err)
os.Exit(1)
}
newShare, err := dsm.ShareGet(shareName)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("new = %#v\n", newShare)
fmt.Printf("Success, ShareSet(%s), quota: %v -> %v MB\n", shareName, share.QuotaValueInMB, newShare.QuotaValueInMB)
},
}
func init() {
cmdShare.AddCommand(cmdShareGet)
cmdShare.AddCommand(cmdShareCreate)
cmdShare.AddCommand(cmdShareDelete)
cmdShare.AddCommand(cmdShareList)
cmdShare.AddCommand(cmdShareClone)
cmdShare.AddCommand(cmdShareSnapshotCreate)
cmdShare.AddCommand(cmdShareSnapshotDelete)
cmdShare.AddCommand(cmdShareSnapshotList)
cmdShare.AddCommand(cmdSharePermissionList)
cmdShare.AddCommand(cmdSharePermissionSet)
cmdShare.AddCommand(cmdShareSet)
}

View File

@@ -0,0 +1,12 @@
# CreateVolumeSecret:
# DeleteVolumeSecret:
# ControllerPublishVolumeSecret:
# ControllerUnpublishVolumeSecret:
# ControllerValidateVolumeCapabilitiesSecret:
NodeStageVolumeSecret:
username: "username"
password: "password"
# NodePublishVolumeSecret:
# CreateSnapshotSecret:
# DeleteSnapshotSecret:
# ControllerExpandVolumeSecret:

View File

@@ -15,6 +15,7 @@ import (
const (
ConfigPath = "./../../config/client-info.yml"
SecretsFilePath = "./sanity-test-secret-file.yaml"
)
func TestSanity(t *testing.T) {
@@ -69,6 +70,15 @@ func TestSanity(t *testing.T) {
testConfig.TargetPath = targetPath
testConfig.StagingPath = stagingPath
testConfig.Address = endpoint
testConfig.SecretsFile = SecretsFilePath
// Set Input parameters for test
testConfig.TestVolumeParameters = map[string]string{
"protocol": "smb",
}
// testConfig.TestVolumeAccessType = "block" // raw block
// Run test
sanity.Test(t, testConfig)
}