Describe the bug
Input:
global_variables:
- &spark_hadoop_master_ip_address 188.184.95.231
- &spark_hadoop_master_fqdn spark-master.cern.ch
- &spark_hadoop_worker_ip_address 188.184.104.92
- &spark_hadoop_worker_fqdn spark-worker.cern.ch
- &spark_hadoop_add_worker_1_ip_address 188.184.31.131
- &spark_hadoop_add_worker_1_fqdn spark-add-worker-1.cern.ch
- &spark_hadoop_add_worker_2_ip_address 188.184.30.108
- &spark_hadoop_add_worker_2_fqdn spark-add-worker-2.cern.ch
- &spark_hadoop_add_worker_3_ip_address 188.184.28.84
- &spark_hadoop_add_worker_3_fqdn spark-add-worker-3.cern.ch
- &spark_hadoop_add_worker_4_ip_address 188.185.82.126
- &spark_hadoop_add_worker_4_fqdn spark-submit.cern.ch
spark_worker_runtime_variables:
- &spark_hadoop_worker_runtime_var_spark_hadoop_master_fqdn
__from__: *spark_hadoop_master_fqdn
preferred_tech_stack:
level_1_configuration: puppet
level_2_configuration: sh
container_orchestration: docker-swarm
container: docker
site_infrastructure:
- fqdn: *spark_hadoop_master_fqdn
ip_address: *spark_hadoop_master_ip_address
- fqdn: *spark_hadoop_worker_fqdn
ip_address: *spark_hadoop_worker_ip_address
- fqdn: *spark_hadoop_add_worker_1_fqdn
ip_address: *spark_hadoop_add_worker_1_ip_address
- fqdn: *spark_hadoop_add_worker_2_fqdn
ip_address: *spark_hadoop_add_worker_2_ip_address
- fqdn: *spark_hadoop_add_worker_3_fqdn
ip_address: *spark_hadoop_add_worker_3_ip_address
- fqdn: *spark_hadoop_add_worker_4_fqdn
ip_address: *spark_hadoop_add_worker_4_ip_address
lightweight_components:
- name: spark-hadoop-master
type: spark_hadoop_master
repository_url: "https://github.com/maany/simple_spark_cluster_master"
repository_revision: "master"
execution_id: 5
deploy:
- node: *spark_hadoop_master_fqdn
container_count: 1
config:
enable_init_daemon: false
spark_driver_memory: "1g"
spark_executor_memory: "1g"
spark_yarn_am_memory: "512m"
spark_history_fs_log_directory: "hdfs://spark-master.cern.ch:9000/spark-logs"
spark_history_fs_update_interval: "30s"
- name: spark-hadoop-worker
type: spark_hadoop_worker
repository_url: "https://github.com/maany/simple_spark_cluster_worker"
repository_revision: "master"
execution_id: 0
deploy:
- node: *spark_hadoop_worker_fqdn
container_count: 1
config:
enable_init_daemon: false
spark_master: *spark_hadoop_master_fqdn
spark_driver_memory: "1g"
spark_executor_memory: "1g"
spark_yarn_am_memory: "512m"
spark_history_fs_log_directory: "hdfs://spark-master.cern.ch:9000/spark-logs"
- name: spark-hadoop-worker
type: spark_hadoop_worker
repository_url: "https://github.com/maany/simple_spark_cluster_worker"
repository_revision: "master"
execution_id: 1
deploy:
- node: *spark_hadoop_add_worker_1_fqdn
container_count: 1
config:
enable_init_daemon: false
spark_master: *spark_hadoop_master_fqdn
spark_driver_memory: "1g"
spark_executor_memory: "1g"
spark_yarn_am_memory: "512m"
spark_history_fs_log_directory: "hdfs://spark-master.cern.ch:9000/spark-logs"
- name: spark-hadoop-worker
type: spark_hadoop_worker
repository_url: "https://github.com/maany/simple_spark_cluster_worker"
repository_revision: "master"
execution_id: 2
deploy:
- node: *spark_hadoop_add_worker_2_fqdn
container_count: 1
config:
enable_init_daemon: false
spark_master: *spark_hadoop_master_fqdn
spark_driver_memory: "1g"
spark_executor_memory: "1g"
spark_yarn_am_memory: "512m"
spark_history_fs_log_directory: "hdfs://spark-master.cern.ch:9000/spark-logs"
- name: spark-hadoop-worker
type: spark_hadoop_worker
repository_url: "https://github.com/maany/simple_spark_cluster_worker"
repository_revision: "master"
execution_id: 3
deploy:
- node: *spark_hadoop_add_worker_3_fqdn
container_count: 1
config:
enable_init_daemon: false
spark_master: *spark_hadoop_master_fqdn
spark_driver_memory: "1g"
spark_executor_memory: "1g"
spark_yarn_am_memory: "512m"
spark_history_fs_log_directory: "hdfs://spark-master.cern.ch:9000/spark-logs"
- name: spark-hadoop-worker
type: spark_hadoop_worker
repository_url: "https://github.com/maany/simple_spark_cluster_worker"
repository_revision: "master"
execution_id: 4
deploy:
- node: *spark_hadoop_add_worker_4_fqdn
container_count: 1
config:
enable_init_daemon: false
spark_master: *spark_hadoop_master_fqdn
spark_driver_memory: "1g"
spark_executor_memory: "1g"
spark_yarn_am_memory: "512m"
spark_history_fs_log_directory: "hdfs://spark-master.cern.ch:9000/spark-logs"
What output/error did you get?
Error about duplicate meta_info_{worker} entries in runtime.yaml
To Reproduce
Run the compiler with the above input
Expected behavior
We should be able to create multiple entries for lightweight components of the same type. In case their config is different, this is required.
Logs
If applicable, attach logs from the yaml compiler
Desktop (please complete the following information):
Additional context
Add any other context about the problem here.
Describe the bug
Input:
What output/error did you get?
Error about duplicate meta_info_{worker} entries in runtime.yaml
To Reproduce
Run the compiler with the above input
Expected behavior
We should be able to create multiple entries for lightweight components of the same type. In case their config is different, this is required.
Logs
If applicable, attach logs from the yaml compiler
Desktop (please complete the following information):
Additional context
Add any other context about the problem here.