Skip to content

Commit 01e8562

Browse files
committed
chore(addons): add spark seaweedfs plans
1 parent da2a4ff commit 01e8562

9 files changed

Lines changed: 240 additions & 2 deletions

File tree

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
credential:
2+
{{- if (eq .Values.filer.service.type "LoadBalancer") }}
3+
- name: EXTERNAL_S3_HOST
4+
valueFrom:
5+
serviceRef:
6+
name: {{ include "seaweedfs.filer.fullname" . }}
7+
jsonpath: '{ .status.loadBalancer.ingress[*].ip }'
8+
{{- end }}
9+
10+
- name: S3_HOST
11+
valueFrom:
12+
serviceRef:
13+
name: {{ include "seaweedfs.filer.fullname" . }}
14+
jsonpath: '{ .spec.clusterIP }'
15+
16+
- name: S3_PORT
17+
valueFrom:
18+
serviceRef:
19+
name: {{ include "seaweedfs.filer.fullname" . }}
20+
jsonpath: '{.spec.ports[?(@.name=="s3")].port}'
21+
22+
- name: S3_CONFIG
23+
valueFrom:
24+
configMapRef:
25+
name: {{ include "seaweedfs.configuration.configMap" . }}
26+
jsonpath: '{.data.s3\.json}'
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
{
2+
"$schema": "http://json-schema.org/draft-04/schema#",
3+
"type": "object",
4+
"properties": {
5+
"imagePullPolicy": {
6+
"type": "string",
7+
"enum": ["Always", "IfNotPresent", "Never"],
8+
"default": "IfNotPresent",
9+
"title": "Image pull policy"
10+
}
11+
}
12+
}
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
name: "standard-v4s2048"
2+
id: 62127217-09e3-4e9b-8464-1a0dffab8803
3+
description: "Seaweedfs standard-v4s2048 plan which limit 4 volumes, each size 2048G."
4+
displayName: "standard-v4s2048"
5+
bindable: true
6+
maximum_polling_duration: 1800
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
fullnameOverride: hb-seaweedfs-standard-v4s2048
2+
3+
master:
4+
resources:
5+
limits:
6+
cpu: 3000m
7+
memory: 10Gi
8+
requests:
9+
cpu: 200m
10+
memory: 5Gi
11+
persistence:
12+
enabled: true
13+
size: 2Gi
14+
storageClass: ""
15+
16+
filer:
17+
resources:
18+
limits:
19+
cpu: 3000m
20+
memory: 10Gi
21+
requests:
22+
cpu: 300m
23+
memory: 5Gi
24+
persistence:
25+
enabled: true
26+
size: 128Gi
27+
storageClass: ""
28+
29+
volume:
30+
replicas: 4
31+
resources:
32+
limits:
33+
cpu: 3000m
34+
memory: 10Gi
35+
requests:
36+
cpu: 300m
37+
memory: 5Gi
38+
persistence:
39+
meta:
40+
enabled: true
41+
size: 2Gi
42+
storageClass: ""
43+
data:
44+
enabled: true
45+
size: 2048Gi
46+
storageClass: ""
47+
compactionMBps: 50
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
credential:
2+
{{- if (eq .Values.service.type "LoadBalancer") }}
3+
- name: EXTERNAL_MASTER_HOST
4+
valueFrom:
5+
serviceRef:
6+
name: {{ printf "%s-master-svc" (include "common.names.fullname" .) }}
7+
jsonpath: '{ .status.loadBalancer.ingress[*].ip }'
8+
{{- end }}
9+
10+
- name: MASTER_HOST
11+
valueFrom:
12+
serviceRef:
13+
name: {{ printf "%s-master-svc" (include "common.names.fullname" .) }}
14+
jsonpath: '{ .spec.clusterIP }'
15+
16+
- name: MASTER_PORT
17+
valueFrom:
18+
serviceRef:
19+
name: {{ printf "%s-master-svc" (include "common.names.fullname" .) }}
20+
jsonpath: '{ .spec.ports[?(@.name=="cluster")].port }'
21+
22+
{{- if .Values.security.ssl.enabled }}
23+
- name: HTTPS_PORT
24+
valueFrom:
25+
serviceRef:
26+
name: {{ printf "%s-master-svc" (include "common.names.fullname" .) }}
27+
jsonpath: '{ .spec.ports[?(@.name=="https")].port }'
28+
{{- else }}
29+
- name: HTTP_PORT
30+
valueFrom:
31+
serviceRef:
32+
name: {{ printf "%s-master-svc" (include "common.names.fullname" .) }}
33+
jsonpath: '{ .spec.ports[?(@.name=="http")].port }'
34+
{{- end }}
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
{
2+
"$schema": "http://json-schema.org/draft-04/schema#",
3+
"type": "object",
4+
"properties": {
5+
"imagePullPolicy": {
6+
"type": "string",
7+
"enum": ["Always", "IfNotPresent", "Never"],
8+
"default": "IfNotPresent",
9+
"title": "Image pull policy"
10+
}
11+
}
12+
}
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
name: "standard-20c40g4w"
2+
id: e820a7a1-ba61-403a-987e-b85dd8d57297
3+
description: "Spark standard-20c40g4w plan which limit resources 20 cores 40G memory 4 workers."
4+
displayName: "standard-20c40g4w"
5+
bindable: true
6+
maximum_polling_duration: 1800
Lines changed: 95 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,95 @@
1+
## @param fullnameOverride String to fully override common.names.fullname template
2+
##
3+
fullnameOverride: "hb-spark-standard-20c40g4w"
4+
5+
## Spark master specific configuration
6+
##
7+
master:
8+
## @param master.daemonMemoryLimit Set the memory limit for the master daemon
9+
##
10+
daemonMemoryLimit: ""
11+
## @param master.configOptions Use a string to set the config options for in the form "-Dx=y"
12+
##
13+
configOptions: ""
14+
## @param master.extraEnvVars Extra environment variables to pass to the master container
15+
## For example:
16+
## extraEnvVars:
17+
## - name: SPARK_DAEMON_JAVA_OPTS
18+
## value: -Dx=y
19+
##
20+
## Container resource requests and limits
21+
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
22+
## We usually recommend not to specify default resources and to leave this as a conscious
23+
## choice for the user. This also increases chances charts run on environments with little
24+
## resources, such as Minikube. If you do want to specify resources, uncomment the following
25+
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
26+
## @param master.resources.limits The resources limits for the container
27+
## @param master.resources.requests The requested resources for the container
28+
##
29+
resources:
30+
limits:
31+
cpu: 20000m
32+
memory: 40Gi
33+
requests:
34+
cpu: 4000m
35+
memory: 20Gi
36+
## Enable persistence using Persistent Volume Claims
37+
## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
38+
##
39+
persistence:
40+
enabled: true
41+
accessModes:
42+
- ReadWriteOnce
43+
size: 2Gi
44+
## @section Spark worker parameters
45+
##
46+
47+
## Spark worker specific configuration
48+
##
49+
worker:
50+
## @param worker.daemonMemoryLimit Set the memory limit for the worker daemon
51+
##
52+
daemonMemoryLimit: ""
53+
## @param worker.memoryLimit Set the maximum memory the worker is allowed to use
54+
##
55+
memoryLimit: ""
56+
## @param worker.coreLimit Se the maximum number of cores that the worker can use
57+
##
58+
coreLimit: ""
59+
javaOptions: ""
60+
## @param worker.configOptions Set extra options to configure the worker in the form `-Dx=y`
61+
##
62+
configOptions: ""
63+
## @param worker.extraEnvVars An array to add extra env vars
64+
## For example:
65+
## extraEnvVars:
66+
## - name: SPARK_DAEMON_JAVA_OPTS
67+
## value: -Dx=y
68+
69+
## @param worker.replicaCount Number of spark workers (will be the minimum number when autoscaling is enabled)
70+
##
71+
replicaCount: 4
72+
## Container resource requests and limits
73+
## ref: https://kubernetes.io/docs/user-guide/compute-resources/
74+
## We usually recommend not to specify default resources and to leave this as a conscious
75+
## choice for the user. This also increases chances charts run on environments with little
76+
## resources, such as Minikube. If you do want to specify resources, uncomment the following
77+
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
78+
## @param worker.resources.limits The resources limits for the container
79+
## @param worker.resources.requests The requested resources for the container
80+
##
81+
resources:
82+
limits:
83+
cpu: 20000m
84+
memory: 40Gi
85+
requests:
86+
cpu: 4000m
87+
memory: 20Gi
88+
## Enable persistence using Persistent Volume Claims
89+
## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/
90+
##
91+
persistence:
92+
enabled: true
93+
accessModes:
94+
- ReadWriteOnce
95+
size: 2Gi

addons/spark/3.4/plans/standard-24c48g5w/values.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ master:
4040
enabled: true
4141
accessModes:
4242
- ReadWriteOnce
43-
size: 1Gi
43+
size: 2Gi
4444
## @section Spark worker parameters
4545
##
4646

@@ -92,4 +92,4 @@ worker:
9292
enabled: true
9393
accessModes:
9494
- ReadWriteOnce
95-
size: 1Gi
95+
size: 2Gi

0 commit comments

Comments
 (0)