-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpi-argo-workflows.yaml
60 lines (60 loc) · 1.8 KB
/
pi-argo-workflows.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
name: spark-operator
namespace: default
spec:
arguments: {}
entrypoint: demo-workflow
serviceAccountName: argo-workflows
templates:
- name: demo-workflow
steps:
- - name: sleep
template: sleep
- - name: spark-operator
template: sparkapp
- name: sleep
container:
image: docker/whalesay
command: [ sleep ]
args: [ "60" ]
- name: sparkapp
resource:
action: create
setOwnerReference: true
successCondition: status.applicationState.state == COMPLETED
failureCondition: status.applicationState.state in (FAILED, ERROR)
manifest: |
apiVersion: "sparkoperator.k8s.io/v1beta2"
kind: SparkApplication
metadata:
generateName: pyspark-pi-
namespace: default
spec:
type: Python
pythonVersion: "3"
mode: cluster
image: "public.ecr.aws/r1l5w1y9/spark-operator:3.2.1-hadoop-3.3.1-java-11-scala-2.12-python-3.8-latest"
mainApplicationFile: "local:///opt/spark/examples/src/main/python/pi.py"
sparkVersion: "3.1.1"
restartPolicy:
type: OnFailure
onFailureRetries: 1
onFailureRetryInterval: 10
onSubmissionFailureRetries: 1
onSubmissionFailureRetryInterval: 20
driver:
cores: 1
coreLimit: "1200m"
memory: "512m"
labels:
version: 3.1.1
serviceAccount: spark
executor:
cores: 1
instances: 2
memory: "512m"
serviceAccount: spark
labels:
version: 3.1.1