From 51bbb6362990492d0546c6d8255c32becacf9d7e Mon Sep 17 00:00:00 2001 From: Janarthanan Selvaraj Date: Mon, 30 Jul 2018 11:39:07 +0530 Subject: [PATCH] PNDA-4780 CDH: orchestrate-pnda-knox fails on centos in openstack --- pillar/hadoop/service_CDH.sls | 37 ++++++++++++++++++++++++ pillar/hadoop/service_HDP.sls | 31 ++++++++++++++++++++ pillar/top.sls | 1 + salt/_modules/pnda.py | 17 ++++++----- salt/cdh/hue-login.sls | 2 +- salt/cdh/templates/cfg_bmstandard.py.tpl | 26 ++++++++--------- salt/cdh/templates/cfg_pico.py.tpl | 26 ++++++++--------- salt/cdh/templates/cfg_production.py.tpl | 26 ++++++++--------- salt/cdh/templates/cfg_standard.py.tpl | 26 ++++++++--------- salt/hdp/oozie_libs.sls | 4 +-- salt/knox/init.sls | 27 +++++++++++------ salt/knox/templates/pnda.xml.tpl | 20 ++++++++----- salt/platform-testing/cdh.sls | 3 +- salt/pnda_opentsdb/conf.sls | 10 +------ 14 files changed, 166 insertions(+), 90 deletions(-) create mode 100644 pillar/hadoop/service_CDH.sls create mode 100644 pillar/hadoop/service_HDP.sls diff --git a/pillar/hadoop/service_CDH.sls b/pillar/hadoop/service_CDH.sls new file mode 100644 index 000000000..a258e6651 --- /dev/null +++ b/pillar/hadoop/service_CDH.sls @@ -0,0 +1,37 @@ +hadoop_services: + hbase_master: + service: hbase01 + component: MASTER + hdfs_namenode: + service: hdfs01 + component: NAMENODE + hive_server: + service: hive01 + component: HIVESERVER2 + port: 10000 + hue_server: + service: hue01 + component: HUE_SERVER + impala_catalog_server: + service: impala01 + component: IMPALAD + oozie_server: + service: oozie01 + component: OOZIE_SERVER + spark_job_histroy_server: + service: spark_on_yarn + component: SPARK_YARN_HISTORY_SERVER + port: 18088 + spark2_job_histroy_server: + service: spark_on_yarn + component: SPARK_YARN_HISTORY_SERVER + port: 18088 + yarn_resource_manager: + service: yarn01 + component: RESOURCEMANAGER + yarn_job_histroy_server: + service: yarn01 + component: JOBHISTORY + zookeeper_server: + service: zk01 + component: SERVER diff --git a/pillar/hadoop/service_HDP.sls b/pillar/hadoop/service_HDP.sls new file mode 100644 index 000000000..978cf1140 --- /dev/null +++ b/pillar/hadoop/service_HDP.sls @@ -0,0 +1,31 @@ +hadoop_services: + hbase_master: + service: HBASE + component: HBASE_MASTER + hdfs_namenode: + service: HDFS + component: NAMENODE + hive_server: + service: HIVE + component: HIVE_SERVER + port: 10001 + oozie_server: + service: OOZIE + component: OOZIE_SERVER + spark_job_histroy_server: + service: SPARK + component: SPARK_JOBHISTORYSERVER + port: 18080 + spark2_job_histroy_server: + service: SPARK2 + component: SPARK2_JOBHISTORYSERVER + port: 18081 + yarn_resource_manager: + service: YARN + component: RESOURCEMANAGER + yarn_job_histroy_server: + service: MAPREDUCE2 + component: HISTORYSERVER + zookeeper_server: + service: ZOOKEEPER + component: ZOOKEEPER_SERVER diff --git a/pillar/top.sls b/pillar/top.sls index c8ecaaf04..2a7b1bfa2 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -7,6 +7,7 @@ - env_parameters - packages.{{ grains['os'] }} - hadoop.{{ salt['grains.get']('hadoop.distro', 'HDP') }} + - hadoop.service_{{ salt['grains.get']('hadoop.distro', 'HDP') }} - gateway {% set certs = 'certs' %} {% if salt.file.file_exists('/srv/salt/platform-salt/pillar/'+certs+'.sls') %} diff --git a/salt/_modules/pnda.py b/salt/_modules/pnda.py index 465fe9136..115bbad4d 100644 --- a/salt/_modules/pnda.py +++ b/salt/_modules/pnda.py @@ -20,7 +20,8 @@ def get_name_service(): """ Returns name service for HA Cluster """ user_name = hadoop_manager_username() password = hadoop_manager_password() - request_url = 'http://%s:7180/api/v11/clusters/%s/services/%s/nameservices' % (hadoop_manager_ip(), cluster_name(), 'hdfs01') + service = __salt__['pillar.get']('hadoop_services:%s:service' % ("hdfs_namenode")) + request_url = 'http://%s:7180/api/v11/clusters/%s/services/%s/nameservices' % (hadoop_manager_ip(), cluster_name(), service) r = requests.get(request_url, auth=(user_name, password)) name_service = "" if r.status_code == 200: @@ -70,7 +71,7 @@ def hadoop_namenode(): if name_service: namenode_host = name_service else: - namenode_host = cloudera_get_hosts_by_hadoop_role('hdfs01', 'NAMENODE')[0] + namenode_host = get_hosts_by_hadoop_role("hdfs_namenode")[0] return 'hdfs://%s:8020' % namenode_host else: return get_namenode_from_ambari() @@ -190,20 +191,22 @@ def cloudera_get_hosts_by_hadoop_role(service, role_type): hosts_ids = [item['hostRef']['hostId'] for item in roles['items'] if item['type'] == role_type] # Get ip addresses - hosts_ips = [] + hosts_names = [] for host_id in hosts_ids: request_host_url = 'http://{}/api/v14/hosts/{}'.format(endpoint, host_id) r = requests.get(request_host_url, auth=(user, password)) r.raise_for_status() - ip_address = r.json()['ipAddress'] - hosts_ips.append(ip_address) + hostname = r.json()['hostname'] + hosts_names.append(hostname) - return hosts_ips + return hosts_names def ambari_get_hosts_by_hadoop_role(service, role_type): return [socket.getfqdn(host['HostRoles']['host_name']) for host in ambari_request('/clusters/%s/services/%s/components/%s' % (cluster_name(),service,role_type))['host_components']] -def get_hosts_by_hadoop_role(service, role_type): +def get_hosts_by_hadoop_role(hadoop_service_name): + service = __salt__['pillar.get']('hadoop_services:%s:service' % (hadoop_service_name)) + role_type = __salt__['pillar.get']('hadoop_services:%s:component' % (hadoop_service_name)) if hadoop_distro() == 'CDH': return cloudera_get_hosts_by_hadoop_role(service, role_type) else: diff --git a/salt/cdh/hue-login.sls b/salt/cdh/hue-login.sls index 72fafce22..fea6d1ae9 100644 --- a/salt/cdh/hue-login.sls +++ b/salt/cdh/hue-login.sls @@ -1,4 +1,4 @@ -{% set hue_server = salt['pnda.get_hosts_by_hadoop_role']('HUE', 'HUE_SERVER')[0] %} +{% set hue_server = salt['pnda.get_hosts_by_hadoop_role']('hue_server')[0] %} cdh-hue_script_copy: file.managed: diff --git a/salt/cdh/templates/cfg_bmstandard.py.tpl b/salt/cdh/templates/cfg_bmstandard.py.tpl index b197346da..d2fbf19dd 100644 --- a/salt/cdh/templates/cfg_bmstandard.py.tpl +++ b/salt/cdh/templates/cfg_bmstandard.py.tpl @@ -50,9 +50,9 @@ CMS_CFG = { } OOZIE_CFG = {"service": "OOZIE", - "name": "oozie01", - "config": {'mapreduce_yarn_service': 'yarn01', - 'zookeeper_service': 'zk01'}, + "name": {% pillar['hadoop_services']['oozie_server']['service'] %}, + "config": {'mapreduce_yarn_service': {% pillar['hadoop_services']['yarn_resource_manager']['service'] %}, + 'zookeeper_service': {% pillar['hadoop_services']['zookeeper_server']['service'] %}}, "roles": [{"name": "oozie-s", "type": "OOZIE_SERVER", "target": "MGR02"}], @@ -66,7 +66,7 @@ OOZIE_CFG = {"service": "OOZIE", 'log_directory_free_space_absolute_thresholds': '{"warning": "1050000000","critical": "900000000"}'}}]} ZK_CFG = {"service": "ZOOKEEPER", - "name": "zk01", + "name": {% pillar['hadoop_services']['zookeeper_server']['service'] %}, "config": {'zookeeper_datadir_autocreate': 'true'}, "roles": [{"name": "zk-s", "type": "SERVER", @@ -85,8 +85,8 @@ ZK_CFG = {"service": "ZOOKEEPER", MAPRED_CFG = { "service": "YARN", - "name": "yarn01", - "config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01', 'yarn_log_aggregation_retain_seconds': '265000'}, + "name": {% pillar['hadoop_services']['yarn_resource_manager']['service'] %}, + "config": {'hdfs_service': {% pillar['hadoop_services']['hdfs_namenode']['service'] %}, 'zookeeper_service': {% pillar['hadoop_services']['zookeeper_server']['service'] %}, 'yarn_log_aggregation_retain_seconds': '265000'}, "roles": [ { "name": "yarn-jh", @@ -167,7 +167,7 @@ S3_CONFIG = """\r\nfs.s3a.access.key{{ aws_key }}< HDFS_CFG = { "service": "HDFS", - "name": "hdfs01", + "name": {% pillar['hadoop_services']['hdfs_namenode']['service'] %}, "config": { 'dfs_replication': 2, @@ -258,8 +258,8 @@ HDFS_CFG = { HBASE_CFG = { "service": "HBASE", - "name": "hbase01", - "config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01', 'hbase_client_keyvalue_maxsize': '209715200'}, + "name": {% pillar['hadoop_services']['hbase_master']['service'] %}, + "config": {'hdfs_service': {% pillar['hadoop_services']['hdfs_namenode']['service'] %}, 'zookeeper_service': {% pillar['hadoop_services']['zookeeper_server']['service'] %}, 'hbase_client_keyvalue_maxsize': '209715200'}, "roles": [ { @@ -324,7 +324,7 @@ HBASE_CFG = { HIVE_CFG = { "service": "HIVE", - "name": "hive01", + "name": {% pillar['hadoop_services']['hive_server']['service'] %}, "config": { 'hive_metastore_database_type': 'mysql', @@ -373,7 +373,7 @@ HIVE_CFG = { IMPALA_CFG = { "service": "IMPALA", - "name": "impala01", + "name": {% pillar['hadoop_services']['impala_catalog_server']['service'] %}, "config": { 'hbase_service': HBASE_CFG['name'], 'hive_service': HIVE_CFG['name'], @@ -397,7 +397,7 @@ IMPALA_CFG = { HUE_CFG = { "service": "HUE", - "name": "hue01", + "name": {% pillar['hadoop_services']['hue_server']['service'] %}, "config": { 'hbase_service': HBASE_CFG['name'], @@ -434,7 +434,7 @@ HUE_CFG = { SPARK_CFG = { 'service': 'SPARK_ON_YARN', - 'name': 'spark_on_yarn', + 'name': {% pillar['hadoop_services']['spark_job_histroy_server']['service'] %}, 'config': { 'yarn_service': MAPRED_CFG['name'], 'spark-conf/spark-env.sh_service_safety_valve': "SPARK_PYTHON_PATH={{ app_packages_dir }}/lib/python2.7/site-packages\nexport PYSPARK_DRIVER_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYSPARK_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYTHONPATH=\"$PYTHONPATH:$SPARK_PYTHON_PATH\"" diff --git a/salt/cdh/templates/cfg_pico.py.tpl b/salt/cdh/templates/cfg_pico.py.tpl index a8f101df4..2557cca5b 100644 --- a/salt/cdh/templates/cfg_pico.py.tpl +++ b/salt/cdh/templates/cfg_pico.py.tpl @@ -81,9 +81,9 @@ CMS_CFG = { } OOZIE_CFG = {"service": "OOZIE", - "name": "oozie01", - "config": {'mapreduce_yarn_service': 'yarn01', - 'zookeeper_service': 'zk01'}, + "name": '{{ pillar['hadoop_services']['oozie_server']['service'] }}', + "config": {'mapreduce_yarn_service': '{{ pillar['hadoop_services']['yarn_resource_manager']['service'] }}', + 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}' }, "roles": [{"name": "oozie-s", "type": "OOZIE_SERVER", "target": "MGR01"}], @@ -98,7 +98,7 @@ OOZIE_CFG = {"service": "OOZIE", 'log_directory_free_space_absolute_thresholds': '{"warning": "1050000000","critical": "900000000"}'}}]} ZK_CFG = {"service": "ZOOKEEPER", - "name": "zk01", + "name": '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}', "config": {'zookeeper_datadir_autocreate': 'true'}, "roles": [{"name": "zk-s", "type": "SERVER", @@ -117,8 +117,8 @@ ZK_CFG = {"service": "ZOOKEEPER", MAPRED_CFG = { "service": "YARN", - "name": "yarn01", - "config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01', 'yarn_log_aggregation_retain_seconds': '86400', 'rm_dirty': 'true', 'yarn_log_aggregation_enable': 'false'}, + "name": '{{ pillar['hadoop_services']['yarn_resource_manager']['service'] }}', + "config": {'hdfs_service': '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}', 'yarn_log_aggregation_retain_seconds': '86400', 'rm_dirty': 'true', 'yarn_log_aggregation_enable': 'false'}, "roles": [ { "name": "yarn-jh", @@ -225,7 +225,7 @@ S3_CONFIG = """\r\nfs.s3a.access.key{{ aws_key }}< HDFS_CFG = { "service": "HDFS", - "name": "hdfs01", + "name": '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', "config": { 'dfs_replication': 1, @@ -317,8 +317,8 @@ HDFS_CFG = { HBASE_CFG = { "service": "HBASE", - "name": "hbase01", - "config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01', 'rm_dirty': 'true'}, + "name": '{{ pillar['hadoop_services']['hbase_master']['service'] }}', + "config": {'hdfs_service': '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}', 'rm_dirty': 'true'}, "roles": [ { @@ -392,7 +392,7 @@ HBASE_CFG = { HIVE_CFG = { "service": "HIVE", - "name": "hive01", + "name": '{{ pillar['hadoop_services']['hive_server']['service'] }}', "config": { 'hive_metastore_database_type': 'mysql', @@ -457,7 +457,7 @@ HIVE_CFG = { IMPALA_CFG = { "service": "IMPALA", - "name": "impala01", + "name": '{{ pillar['hadoop_services']['impala_catalog_server']['service'] }}', "config": { 'hbase_service': HBASE_CFG['name'], 'hive_service': HIVE_CFG['name'], @@ -496,7 +496,7 @@ IMPALA_CFG = { HUE_CFG = { "service": "HUE", - "name": "hue01", + "name": '{{ pillar['hadoop_services']['hue_server']['service'] }}', "config": { 'hbase_service': HBASE_CFG['name'], @@ -534,7 +534,7 @@ HUE_CFG = { SPARK_CFG = { 'service': 'SPARK_ON_YARN', - 'name': 'spark_on_yarn', + 'name': '{{ pillar['hadoop_services']['spark_job_histroy_server']['service'] }}', 'config': { 'yarn_service': MAPRED_CFG['name'], 'spark-conf/spark-env.sh_service_safety_valve': "SPARK_PYTHON_PATH={{ app_packages_dir }}/lib/python2.7/site-packages\nexport PYSPARK_DRIVER_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYSPARK_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYTHONPATH=\"$PYTHONPATH:$SPARK_PYTHON_PATH\"" diff --git a/salt/cdh/templates/cfg_production.py.tpl b/salt/cdh/templates/cfg_production.py.tpl index d8994ba90..81a90aa21 100644 --- a/salt/cdh/templates/cfg_production.py.tpl +++ b/salt/cdh/templates/cfg_production.py.tpl @@ -51,9 +51,9 @@ CMS_CFG = { } OOZIE_CFG = {"service": "OOZIE", - "name": "oozie01", - "config": {'mapreduce_yarn_service': 'yarn01', - 'zookeeper_service': 'zk01'}, + "name": '{{ pillar['hadoop_services']['oozie_server']['service'] }}', + "config": {'mapreduce_yarn_service': '{{ pillar['hadoop_services']['yarn_resource_manager']['service'] }}', + 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}'}, "roles": [{"name": "oozie-s", "type": "OOZIE_SERVER", "target": "MGR04"}], @@ -68,7 +68,7 @@ OOZIE_CFG = {"service": "OOZIE", 'log_directory_free_space_absolute_thresholds': '{"warning": "1050000000","critical": "900000000"}'}}]} ZK_CFG = {"service": "ZOOKEEPER", - "name": "zk01", + "name": '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}', "config": {'zookeeper_datadir_autocreate': 'true'}, "roles": [{"name": "zk-s", "type": "SERVER", @@ -92,8 +92,8 @@ ZK_CFG = {"service": "ZOOKEEPER", MAPRED_CFG = { "service": "YARN", - "name": "yarn01", - "config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01', 'yarn_log_aggregation_retain_seconds': '265000', 'yarn_log_aggregation_enable': 'false'}, + "name": '{{ pillar['hadoop_services']['yarn_resource_manager']['service'] }}', + "config": {'hdfs_service': '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}', 'yarn_log_aggregation_retain_seconds': '265000', 'yarn_log_aggregation_enable': 'false'}, "roles": [ { "name": "yarn-jh", @@ -189,7 +189,7 @@ S3_CONFIG = """\r\nfs.s3a.access.key{{ aws_key }}< HDFS_CFG = { "service": "HDFS", - "name": "hdfs01", + "name": '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', "config": { 'dfs_replication': 2, @@ -299,8 +299,8 @@ HDFS_CFG = { HBASE_CFG = { "service": "HBASE", - "name": "hbase01", - "config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01'}, + "name": '{{ pillar['hadoop_services']['hbase_master']['service'] }}', + "config": {'hdfs_service': '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}'}, "roles": [ { @@ -371,7 +371,7 @@ HBASE_CFG = { HIVE_CFG = { "service": "HIVE", - "name": "hive01", + "name": '{{ pillar['hadoop_services']['hive_server']['service'] }}', "config": { 'hive_metastore_database_type': 'mysql', @@ -424,7 +424,7 @@ HIVE_CFG = { IMPALA_CFG = { "service": "IMPALA", - "name": "impala01", + "name": '{{ pillar['hadoop_services']['impala_catalog_server']['service'] }}', "config": { 'hbase_service': HBASE_CFG['name'], 'hive_service': HIVE_CFG['name'], @@ -449,7 +449,7 @@ IMPALA_CFG = { HUE_CFG = { "service": "HUE", - "name": "hue01", + "name": '{{ pillar['hadoop_services']['hue_server']['service'] }}', "config": { 'hbase_service': HBASE_CFG['name'], @@ -486,7 +486,7 @@ HUE_CFG = { SPARK_CFG = { 'service': 'SPARK_ON_YARN', - 'name': 'spark_on_yarn', + 'name': '{{ pillar['hadoop_services']['spark_job_histroy_server']['service'] }}', 'config': { 'yarn_service': MAPRED_CFG['name'], 'spark-conf/spark-env.sh_service_safety_valve':"SPARK_PYTHON_PATH={{ app_packages_dir }}/lib/python2.7/site-packages\nexport PYSPARK_DRIVER_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYSPARK_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYTHONPATH=\"$PYTHONPATH:$SPARK_PYTHON_PATH\"" diff --git a/salt/cdh/templates/cfg_standard.py.tpl b/salt/cdh/templates/cfg_standard.py.tpl index 0a1d6b3d9..e3aa26972 100644 --- a/salt/cdh/templates/cfg_standard.py.tpl +++ b/salt/cdh/templates/cfg_standard.py.tpl @@ -50,9 +50,9 @@ CMS_CFG = { } OOZIE_CFG = {"service": "OOZIE", - "name": "oozie01", - "config": {'mapreduce_yarn_service': 'yarn01', - 'zookeeper_service': 'zk01'}, + "name": '{{ pillar['hadoop_services']['oozie_server']['service'] }}', + "config": {'mapreduce_yarn_service': '{{ pillar['hadoop_services']['yarn_resource_manager']['service'] }}', + 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}'}, "roles": [{"name": "oozie-s", "type": "OOZIE_SERVER", "target": "MGR04"}], @@ -66,7 +66,7 @@ OOZIE_CFG = {"service": "OOZIE", 'log_directory_free_space_absolute_thresholds': '{"warning": "1050000000","critical": "900000000"}'}}]} ZK_CFG = {"service": "ZOOKEEPER", - "name": "zk01", + "name": '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}', "config": {'zookeeper_datadir_autocreate': 'true'}, "roles": [{"name": "zk-s", "type": "SERVER", @@ -85,8 +85,8 @@ ZK_CFG = {"service": "ZOOKEEPER", MAPRED_CFG = { "service": "YARN", - "name": "yarn01", - "config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01', 'yarn_log_aggregation_retain_seconds': '265000', 'yarn_log_aggregation_enable': 'false'}, + "name": '{{ pillar['hadoop_services']['yarn_resource_manager']['service'] }}', + "config": {'hdfs_service': '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}', 'yarn_log_aggregation_retain_seconds': '265000', 'yarn_log_aggregation_enable': 'false'}, "roles": [ { "name": "yarn-jh", @@ -167,7 +167,7 @@ S3_CONFIG = """\r\nfs.s3a.access.key{{ aws_key }}< HDFS_CFG = { "service": "HDFS", - "name": "hdfs01", + "name": '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', "config": { 'dfs_replication': 2, @@ -267,8 +267,8 @@ HDFS_CFG = { HBASE_CFG = { "service": "HBASE", - "name": "hbase01", - "config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01'}, + "name": '{{ pillar['hadoop_services']['hbase_master']['service'] }}', + "config": {'hdfs_service': '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}'}, "roles": [ { @@ -333,7 +333,7 @@ HBASE_CFG = { HIVE_CFG = { "service": "HIVE", - "name": "hive01", + "name": '{{ pillar['hadoop_services']['hive_server']['service'] }}', "config": { 'hive_metastore_database_type': 'mysql', @@ -382,7 +382,7 @@ HIVE_CFG = { IMPALA_CFG = { "service": "IMPALA", - "name": "impala01", + "name": '{{ pillar['hadoop_services']['impala_catalog_server']['service'] }}', "config": { 'hbase_service': HBASE_CFG['name'], 'hive_service': HIVE_CFG['name'], @@ -406,7 +406,7 @@ IMPALA_CFG = { HUE_CFG = { "service": "HUE", - "name": "hue01", + "name": '{{ pillar['hadoop_services']['hue_server']['service'] }}', "config": { 'hbase_service': HBASE_CFG['name'], @@ -443,7 +443,7 @@ HUE_CFG = { SPARK_CFG = { 'service': 'SPARK_ON_YARN', - 'name': 'spark_on_yarn', + 'name': '{{ pillar['hadoop_services']['spark_job_histroy_server']['service'] }}', 'config': { 'yarn_service': MAPRED_CFG['name'], 'spark-conf/spark-env.sh_service_safety_valve':"SPARK_PYTHON_PATH={{ app_packages_dir }}/lib/python2.7/site-packages\nexport PYSPARK_DRIVER_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYSPARK_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYTHONPATH=\"$PYTHONPATH:$SPARK_PYTHON_PATH\"" diff --git a/salt/hdp/oozie_libs.sls b/salt/hdp/oozie_libs.sls index 3ad0b664f..f853d545b 100644 --- a/salt/hdp/oozie_libs.sls +++ b/salt/hdp/oozie_libs.sls @@ -1,6 +1,6 @@ {% set scripts_location = '/tmp/pnda-install/' + sls %} -{% set httpfs_node = salt['pnda.get_hosts_by_hadoop_role']('HDFS', 'NAMENODE')[0] %} -{% set oozie_node = salt['pnda.get_hosts_by_hadoop_role']('OOZIE', 'OOZIE_SERVER')[0] %} +{% set httpfs_node = salt['pnda.get_hosts_by_hadoop_role']('hdfs_namenode')[0] %} +{% set oozie_node = salt['pnda.get_hosts_by_hadoop_role']('oozie_server')[0] %} {% set pip_index_url = pillar['pip']['index_url'] %} {% set oozie_spark_version = salt['pillar.get']('hdp:oozie_spark_version', '1') %} diff --git a/salt/knox/init.sls b/salt/knox/init.sls index 035edbbf0..db4810250 100644 --- a/salt/knox/init.sls +++ b/salt/knox/init.sls @@ -5,15 +5,17 @@ {% set pnda_mirror = pillar['pnda_mirror']['base_url'] %} {% set misc_packages_path = pillar['pnda_mirror']['misc_packages_path'] %} {% set mirror_location = pnda_mirror + misc_packages_path %} -{% set namenode_host = salt['pnda.get_hosts_by_hadoop_role']('HDFS', 'NAMENODE')[0] %} -{% set hive_host = salt['pnda.get_hosts_by_hadoop_role']('HIVE', 'HIVE_SERVER')[0] %} +{% set namenode_host = salt['pnda.get_hosts_by_hadoop_role']('hdfs_namenode')[0] %} +{% set hive_host = salt['pnda.get_hosts_by_hadoop_role']('hive_server')[0] %} +{% set hive_port = pillar['hadoop_services']['hive_server']['port'] %} {% set webhdfs_host = salt['pnda.get_hosts_by_hadoop_node']('MGR01')[0] %} {% set hbase_rest_host = salt['pnda.get_hosts_by_hadoop_node']('MGR01')[0] %} -{% set yarn_rm_hosts = salt['pnda.get_hosts_by_hadoop_role']('YARN', 'RESOURCEMANAGER') %} +{% set yarn_rm_hosts = salt['pnda.get_hosts_by_hadoop_role']('yarn_resource_manager') %} {% set yarn_ha_enabled = (yarn_rm_hosts is not none and yarn_rm_hosts|length>1) %} -{% set mr2_history_server_host = salt['pnda.get_hosts_by_hadoop_role']('MAPREDUCE2', 'HISTORYSERVER')[0] %} -{% set spark_history_server_host = salt['pnda.get_hosts_by_hadoop_role']('SPARK', 'SPARK_JOBHISTORYSERVER')[0] %} -{% set spark2_history_server_host = salt['pnda.get_hosts_by_hadoop_role']('SPARK2', 'SPARK2_JOBHISTORYSERVER')[0] %} +{% set mr2_history_server_host = salt['pnda.get_hosts_by_hadoop_role']('yarn_job_histroy_server')[0] %} +{% set spark_history_server_host = salt['pnda.get_hosts_by_hadoop_role']('spark_job_histroy_server')[0] %} +{% set spark_history_server_port = pillar['hadoop_services']['spark_job_histroy_server']['port'] %} +{% set spark2_history_server_host = salt['pnda.get_hosts_by_hadoop_role']('spark2_job_histroy_server')[0] %} {% set ambari_server_host = salt['pnda.get_hosts_for_role']('hadoop_manager')[0] %} {% set flink_history_server_host = salt['pnda.get_hosts_for_role']("FLINK")[0] %} {% set flink_history_server_port = pillar['flink']['historyserver_web_port'] %} @@ -30,6 +32,7 @@ {% set gateway = knox_home_directory + '/data/security/keystores/gateway.jks' %} {% set opentsdb_port = pillar['opentsdb']['bind_port'] %} {% set helper_directory = knox_home_directory + '/helper' %} +{% set hadoop_distro = salt['grains.get']('hadoop.distro', 'HDP') %} include: - java @@ -148,16 +151,19 @@ knox-create-pnda-topology: - source: salt://knox/templates/pnda.xml.tpl - template: jinja - context: + hadoop_distro: {{ hadoop_distro }} knox_authentication: {{ knox_authentication }} namenode_host: {{ namenode_host }} webhdfs_host: {{ webhdfs_host }} hbase_rest_host: {{ hbase_rest_host }} - yarn_rm_hosts: {{ yarn_rm_hosts }} + yarn_rm_hosts: ["{{yarn_rm_hosts|join('", "')|string()}}"] hive_host: {{ hive_host }} + hive_port: {{ hive_port }} pnda_domain: {{ pnda_domain }} opentsdb_port: {{ opentsdb_port }} ha_enabled: {{ yarn_ha_enabled }} spark_history_server_host: {{ spark_history_server_host }} + spark_history_server_port: {{ spark_history_server_port }} spark2_history_server_host: {{ spark2_history_server_host }} mr2_history_server_host: {{ mr2_history_server_host }} ambari_server_host: {{ ambari_server_host }} @@ -249,10 +255,13 @@ knox-import_CA: 'console': 'pnda-console/1.0.0', 'km': 'kafka-manager/1.3.3', 'kibana': 'kibana/6.2.1', - 'flinkhistoryui': 'flinkhistoryui/1.4.2', - 'spark2historyui': 'spark2historyui/2.2.0' + 'flinkhistoryui': 'flinkhistoryui/1.4.2' } %} +{% if hadoop_distro == 'HDP' %} + {% do knox_proxy_services.update({'spark2historyui': 'spark2historyui/2.2.0'}) %} +{% endif %} + {% for service_name in knox_proxy_services %} {% set knox_service_dir = knox_home_directory + '/data/services/' + knox_proxy_services[service_name] %} knox-service_dir_{{ service_name }}: diff --git a/salt/knox/templates/pnda.xml.tpl b/salt/knox/templates/pnda.xml.tpl index e4276e1f5..e42d661cd 100644 --- a/salt/knox/templates/pnda.xml.tpl +++ b/salt/knox/templates/pnda.xml.tpl @@ -64,7 +64,7 @@ HIVE - http://{{ hive_host }}:10001/cliservice + http://{{ hive_host }}:{{ hive_port }}/cliservice @@ -72,7 +72,6 @@ {% for item in yarn_rm_hosts %} http://{{ item }}:8088 {% endfor %} - @@ -89,12 +88,7 @@ SPARKHISTORYUI - http://{{ spark_history_server_host }}:18080 - - - - SPARK2HISTORYUI - http://{{ spark2_history_server_host }}:18081 + http://{{ spark_history_server_host }}:{{ spark_history_server_port }} @@ -117,6 +111,14 @@ http://console-internal.service.{{ pnda_domain }} + +{% if hadoop_distro == 'HDP' %} + + + SPARK2HISTORYUI + http://{{ spark2_history_server_host }}:18081 + + AMBARI http://{{ ambari_server_host }}:8080 @@ -127,6 +129,8 @@ http://{{ ambari_server_host }}:8080 +{% endif %} + FLINKHISTORYUI http://{{ flink_history_server_host }}:{{ flink_history_server_port }} diff --git a/salt/platform-testing/cdh.sls b/salt/platform-testing/cdh.sls index b2bc15c0f..1c4702742 100644 --- a/salt/platform-testing/cdh.sls +++ b/salt/platform-testing/cdh.sls @@ -17,11 +17,10 @@ {% set console_port = '3001' %} +{% set hive_node = salt['pnda.get_hosts_by_hadoop_role']('hive_server')[0] %} {%- if grains['hadoop.distro'] == 'CDH' -%} -{% set hive_node = salt['pnda.get_hosts_by_hadoop_role']('hive01', 'HIVESERVER2')[0] %} {% set hive_http_port = '10000' %} {%- else -%} -{% set hive_node = salt['pnda.get_hosts_by_hadoop_role']('HIVE', 'HIVE_SERVER')[0] %} {% set hive_http_port = '10001' %} {%- endif -%} diff --git a/salt/pnda_opentsdb/conf.sls b/salt/pnda_opentsdb/conf.sls index a76a643bc..a47db3484 100644 --- a/salt/pnda_opentsdb/conf.sls +++ b/salt/pnda_opentsdb/conf.sls @@ -1,14 +1,6 @@ -{% if grains['hadoop.distro'] == 'CDH' %} -{% set zk_service = 'zk01' %} -{% set zk_role = 'SERVER' %} -{% else %} -{% set zk_service = 'ZOOKEEPER' %} -{% set zk_role = 'ZOOKEEPER_SERVER' %} -{% endif %} - {% set hadoop_zk = [] %} -{% for ip in salt['pnda.get_hosts_by_hadoop_role'](zk_service, zk_role) %} +{% for ip in salt['pnda.get_hosts_by_hadoop_role']('zookeeper_server') %} {% do hadoop_zk.append(ip+':2181') %} {% endfor %}