k8s-Minio集群迁移
# 一.场景
公司k8s集群要迁移,minio部署在集群里(因为公司方向因素用不了oss),迁移的时候尽可能的要求minio的数据一致,官方有介绍用mc命令 官方文档 (opens new window)
- 旧: minio version RELEASE.2021-04-22T15-44-28Z
- 新: minio version RELEASE.2023-08-09T23-30-22Z
# 二.部署
# 2.1 部署minio集群
[root@prod-manage minio-cluster]# cat minio-cluster.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: minio
spec:
serviceName: "minio-headless"
replicas: 4
selector:
matchLabels:
app: minio
template:
metadata:
labels:
app: minio
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- minio
topologyKey: "kubernetes.io/hostname"
containers:
- name: minio
env:
- name: MINIO_ROOT_USER
value: "admin"
- name: MINIO_ROOT_PASSWORD
value: "minioadmin"
- name: MINIO_ERASURE_CODING
value: "on"
- name: MINIO_DATA_SHARDS
value: "2"
- name: MINIO_PARITY_SHARDS
value: "2"
image: minio/minio:RELEASE.2023-08-09T23-30-22Z
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- minio server --console-address ":5000" http://minio-{0...3}.minio-headless.default.svc.cluster.local:9000/data
ports:
- name: data
containerPort: 9000
protocol: "TCP"
- name: console
containerPort: 5000
protocol: "TCP"
volumeMounts:
- name: minio-data
mountPath: /data
- name: time-mount
mountPath: /etc/localtime
volumes:
- name: time-mount
hostPath:
path: /usr/share/zoneinfo/Asia/Shanghai
volumeClaimTemplates:
- metadata:
name: minio-data
spec:
storageClassName: "minio-cluster"
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 2Ti
---
apiVersion: v1
kind: Service
metadata:
name: minio-headless
labels:
app: minio
spec:
clusterIP: None
ports:
- port: 9000
name: data
- port: 5000
name: console
selector:
app: minio
---
apiVersion: v1
kind: Service
metadata:
name: minio-service
spec:
type: NodePort
ports:
- name: data
nodePort: 31900
port: 9000
targetPort: 9000
protocol: TCP
- name: console
nodePort: 31901
port: 5000
targetPort: 5000
protocol: TCP
selector:
app: minio
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
labels:
ingress-controller: nginx
name: minio
spec:
ingressClassName: nginx
rules:
- host: prod.minio.demo.com
http:
paths:
- backend:
service:
name: minio-service
port:
number: 5000
path: /
pathType: Prefix
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# 2.2 部署mc
[root@prod-manage minio-cluster]# docker run -it --entrypoint=/bin/sh minio/mc
1
# 三.测试迁移
# 3.1分别设置minio2021和minio2023的alias 参考 (opens new window)
mc alias set old http://172.19.10.2:48289 admin minioadmin
mc alias set new http://172.19.10.238:31900 admin minioadmin
1
2
2
# 3.2mc迁移数据通常有下面三种场景:
##1. 全量迁移,重名文件不覆盖,如bucket不存在,会自动创建
mc mirror old new
##2. 只迁移某个bucket,以test为例,迁移的目标bucket需要提前创建
mc mirror old/test new/test #test要提前在minio2023中创建
##3. 加上--overwrite参数,覆盖重名文件
mc mirror --overwrite old new
mc mirror --overwrite old/test new/test
1
2
3
4
5
6
7
8
9
2
3
4
5
6
7
8
9
# 3.3量大短暂迁移不了,需要后台运行,实时同步
[root@prod-manage ~]# cat job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: mc-setup-and-mirror
spec:
template:
metadata:
labels:
app: mc-setup-and-mirror
spec:
containers:
- name: mc-setup
image: minio/mc:latest
command: ["sh", "-c", "mc alias set old http://172.19.10.2:48289 admin minioadmin && mc alias set new http://172.19.10.238:31900 admin minioadmin && mc mirror --watch old new"]
restartPolicy: OnFailure
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# 3.4查看日志同步进程
[root@prod-manage ~]# kubectl logs -f mc-setup-and-mirror-c5w4m
...
`old/weather-analysis/cldas@r3857c/202403032200/TT2/UNKNOWN/105_37_7.32753.png` -> `new/weather-analysis/cldas@r3857c/202403032200/TT2/UNKNOWN/105_37_7.32753.png`
1
2
3
2
3
编辑 (opens new window)
上次更新: 2024/08/06, 14:12:34