Skip to content

Commit

Permalink
PNDA-4780 CDH: orchestrate-pnda-knox fails on centos in openstack
Browse files Browse the repository at this point in the history
  • Loading branch information
janselva committed Aug 27, 2018
1 parent 38d4f02 commit 2dc4235
Show file tree
Hide file tree
Showing 14 changed files with 166 additions and 90 deletions.
37 changes: 37 additions & 0 deletions pillar/hadoop/service_CDH.sls
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
hadoop_services:
hbase_master:
service: hbase01
component: MASTER
hdfs_namenode:
service: hdfs01
component: NAMENODE
hive_server:
service: hive01
component: HIVESERVER2
port: 10000
hue_server:
service: hue01
component: HUE_SERVER
impala_catalog_server:
service: impala01
component: IMPALAD
oozie_server:
service: oozie01
component: OOZIE_SERVER
spark_job_histroy_server:
service: spark_on_yarn
component: SPARK_YARN_HISTORY_SERVER
port: 18088
spark2_job_histroy_server:
service: spark_on_yarn
component: SPARK_YARN_HISTORY_SERVER
port: 18088
yarn_resource_manager:
service: yarn01
component: RESOURCEMANAGER
yarn_job_histroy_server:
service: yarn01
component: JOBHISTORY
zookeeper_server:
service: zk01
component: SERVER
31 changes: 31 additions & 0 deletions pillar/hadoop/service_HDP.sls
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
hadoop_services:
hbase_master:
service: HBASE
component: HBASE_MASTER
hdfs_namenode:
service: HDFS
component: NAMENODE
hive_server:
service: HIVE
component: HIVE_SERVER
port: 10001
oozie_server:
service: OOZIE
component: OOZIE_SERVER
spark_job_histroy_server:
service: SPARK
component: SPARK_JOBHISTORYSERVER
port: 18080
spark2_job_histroy_server:
service: SPARK2
component: SPARK2_JOBHISTORYSERVER
port: 18081
yarn_resource_manager:
service: YARN
component: RESOURCEMANAGER
yarn_job_histroy_server:
service: MAPREDUCE2
component: HISTORYSERVER
zookeeper_server:
service: ZOOKEEPER
component: ZOOKEEPER_SERVER
1 change: 1 addition & 0 deletions pillar/top.sls
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
- env_parameters
- packages.{{ grains['os'] }}
- hadoop.{{ salt['grains.get']('hadoop.distro', 'HDP') }}
- hadoop.service_{{ salt['grains.get']('hadoop.distro', 'HDP') }}
- gateway
{% set certs = 'certs' %}
{% if salt.file.file_exists('/srv/salt/platform-salt/pillar/'+certs+'.sls') %}
Expand Down
17 changes: 10 additions & 7 deletions salt/_modules/pnda.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,8 @@ def get_name_service():
""" Returns name service for HA Cluster """
user_name = hadoop_manager_username()
password = hadoop_manager_password()
request_url = 'http://%s:7180/api/v11/clusters/%s/services/%s/nameservices' % (hadoop_manager_ip(), cluster_name(), 'hdfs01')
service = __salt__['pillar.get']('hadoop_services:%s:service' % ("hdfs_namenode"))
request_url = 'http://%s:7180/api/v11/clusters/%s/services/%s/nameservices' % (hadoop_manager_ip(), cluster_name(), service)
r = requests.get(request_url, auth=(user_name, password))
name_service = ""
if r.status_code == 200:
Expand Down Expand Up @@ -70,7 +71,7 @@ def hadoop_namenode():
if name_service:
namenode_host = name_service
else:
namenode_host = cloudera_get_hosts_by_hadoop_role('hdfs01', 'NAMENODE')[0]
namenode_host = get_hosts_by_hadoop_role("hdfs_namenode")[0]
return 'hdfs://%s:8020' % namenode_host
else:
return get_namenode_from_ambari()
Expand Down Expand Up @@ -190,20 +191,22 @@ def cloudera_get_hosts_by_hadoop_role(service, role_type):
hosts_ids = [item['hostRef']['hostId'] for item in roles['items'] if item['type'] == role_type]

# Get ip addresses
hosts_ips = []
hosts_names = []
for host_id in hosts_ids:
request_host_url = 'http://{}/api/v14/hosts/{}'.format(endpoint, host_id)
r = requests.get(request_host_url, auth=(user, password))
r.raise_for_status()
ip_address = r.json()['ipAddress']
hosts_ips.append(ip_address)
hostname = r.json()['hostname']
hosts_names.append(hostname)

return hosts_ips
return hosts_names

def ambari_get_hosts_by_hadoop_role(service, role_type):
return [socket.getfqdn(host['HostRoles']['host_name']) for host in ambari_request('/clusters/%s/services/%s/components/%s' % (cluster_name(),service,role_type))['host_components']]

def get_hosts_by_hadoop_role(service, role_type):
def get_hosts_by_hadoop_role(hadoop_service_name):
service = __salt__['pillar.get']('hadoop_services:%s:service' % (hadoop_service_name))
role_type = __salt__['pillar.get']('hadoop_services:%s:component' % (hadoop_service_name))
if hadoop_distro() == 'CDH':
return cloudera_get_hosts_by_hadoop_role(service, role_type)
else:
Expand Down
2 changes: 1 addition & 1 deletion salt/cdh/hue-login.sls
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
{% set hue_server = salt['pnda.get_hosts_by_hadoop_role']('HUE', 'HUE_SERVER')[0] %}
{% set hue_server = salt['pnda.get_hosts_by_hadoop_role']('hue_server')[0] %}

cdh-hue_script_copy:
file.managed:
Expand Down
26 changes: 13 additions & 13 deletions salt/cdh/templates/cfg_bmstandard.py.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -50,9 +50,9 @@ CMS_CFG = {
}

OOZIE_CFG = {"service": "OOZIE",
"name": "oozie01",
"config": {'mapreduce_yarn_service': 'yarn01',
'zookeeper_service': 'zk01'},
"name": {% pillar['hadoop_services']['oozie_server']['service'] %},
"config": {'mapreduce_yarn_service': {% pillar['hadoop_services']['yarn_resource_manager']['service'] %},
'zookeeper_service': {% pillar['hadoop_services']['zookeeper_server']['service'] %}},
"roles": [{"name": "oozie-s",
"type": "OOZIE_SERVER",
"target": "MGR02"}],
Expand All @@ -66,7 +66,7 @@ OOZIE_CFG = {"service": "OOZIE",
'log_directory_free_space_absolute_thresholds': '{"warning": "1050000000","critical": "900000000"}'}}]}

ZK_CFG = {"service": "ZOOKEEPER",
"name": "zk01",
"name": {% pillar['hadoop_services']['zookeeper_server']['service'] %},
"config": {'zookeeper_datadir_autocreate': 'true'},
"roles": [{"name": "zk-s",
"type": "SERVER",
Expand All @@ -85,8 +85,8 @@ ZK_CFG = {"service": "ZOOKEEPER",

MAPRED_CFG = {
"service": "YARN",
"name": "yarn01",
"config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01', 'yarn_log_aggregation_retain_seconds': '265000'},
"name": {% pillar['hadoop_services']['yarn_resource_manager']['service'] %},
"config": {'hdfs_service': {% pillar['hadoop_services']['hdfs_namenode']['service'] %}, 'zookeeper_service': {% pillar['hadoop_services']['zookeeper_server']['service'] %}, 'yarn_log_aggregation_retain_seconds': '265000'},
"roles": [
{
"name": "yarn-jh",
Expand Down Expand Up @@ -167,7 +167,7 @@ S3_CONFIG = """\r\n<property><name>fs.s3a.access.key</name><value>{{ aws_key }}<

HDFS_CFG = {
"service": "HDFS",
"name": "hdfs01",
"name": {% pillar['hadoop_services']['hdfs_namenode']['service'] %},
"config":
{
'dfs_replication': 2,
Expand Down Expand Up @@ -258,8 +258,8 @@ HDFS_CFG = {

HBASE_CFG = {
"service": "HBASE",
"name": "hbase01",
"config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01', 'hbase_client_keyvalue_maxsize': '209715200'},
"name": {% pillar['hadoop_services']['hbase_master']['service'] %},
"config": {'hdfs_service': {% pillar['hadoop_services']['hdfs_namenode']['service'] %}, 'zookeeper_service': {% pillar['hadoop_services']['zookeeper_server']['service'] %}, 'hbase_client_keyvalue_maxsize': '209715200'},
"roles":
[
{
Expand Down Expand Up @@ -324,7 +324,7 @@ HBASE_CFG = {

HIVE_CFG = {
"service": "HIVE",
"name": "hive01",
"name": {% pillar['hadoop_services']['hive_server']['service'] %},
"config":
{
'hive_metastore_database_type': 'mysql',
Expand Down Expand Up @@ -373,7 +373,7 @@ HIVE_CFG = {

IMPALA_CFG = {
"service": "IMPALA",
"name": "impala01",
"name": {% pillar['hadoop_services']['impala_catalog_server']['service'] %},
"config": {
'hbase_service': HBASE_CFG['name'],
'hive_service': HIVE_CFG['name'],
Expand All @@ -397,7 +397,7 @@ IMPALA_CFG = {

HUE_CFG = {
"service": "HUE",
"name": "hue01",
"name": {% pillar['hadoop_services']['hue_server']['service'] %},
"config":
{
'hbase_service': HBASE_CFG['name'],
Expand Down Expand Up @@ -434,7 +434,7 @@ HUE_CFG = {

SPARK_CFG = {
'service': 'SPARK_ON_YARN',
'name': 'spark_on_yarn',
'name': {% pillar['hadoop_services']['spark_job_histroy_server']['service'] %},
'config': {
'yarn_service': MAPRED_CFG['name'],
'spark-conf/spark-env.sh_service_safety_valve': "SPARK_PYTHON_PATH={{ app_packages_dir }}/lib/python2.7/site-packages\nexport PYSPARK_DRIVER_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYSPARK_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYTHONPATH=\"$PYTHONPATH:$SPARK_PYTHON_PATH\""
Expand Down
26 changes: 13 additions & 13 deletions salt/cdh/templates/cfg_pico.py.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -81,9 +81,9 @@ CMS_CFG = {
}

OOZIE_CFG = {"service": "OOZIE",
"name": "oozie01",
"config": {'mapreduce_yarn_service': 'yarn01',
'zookeeper_service': 'zk01'},
"name": '{{ pillar['hadoop_services']['oozie_server']['service'] }}',
"config": {'mapreduce_yarn_service': '{{ pillar['hadoop_services']['yarn_resource_manager']['service'] }}',
'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}' },
"roles": [{"name": "oozie-s",
"type": "OOZIE_SERVER",
"target": "MGR01"}],
Expand All @@ -98,7 +98,7 @@ OOZIE_CFG = {"service": "OOZIE",
'log_directory_free_space_absolute_thresholds': '{"warning": "1050000000","critical": "900000000"}'}}]}

ZK_CFG = {"service": "ZOOKEEPER",
"name": "zk01",
"name": '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}',
"config": {'zookeeper_datadir_autocreate': 'true'},
"roles": [{"name": "zk-s",
"type": "SERVER",
Expand All @@ -117,8 +117,8 @@ ZK_CFG = {"service": "ZOOKEEPER",

MAPRED_CFG = {
"service": "YARN",
"name": "yarn01",
"config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01', 'yarn_log_aggregation_retain_seconds': '86400', 'rm_dirty': 'true', 'yarn_log_aggregation_enable': 'false'},
"name": '{{ pillar['hadoop_services']['yarn_resource_manager']['service'] }}',
"config": {'hdfs_service': '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}', 'yarn_log_aggregation_retain_seconds': '86400', 'rm_dirty': 'true', 'yarn_log_aggregation_enable': 'false'},
"roles": [
{
"name": "yarn-jh",
Expand Down Expand Up @@ -225,7 +225,7 @@ S3_CONFIG = """\r\n<property><name>fs.s3a.access.key</name><value>{{ aws_key }}<

HDFS_CFG = {
"service": "HDFS",
"name": "hdfs01",
"name": '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}',
"config":
{
'dfs_replication': 1,
Expand Down Expand Up @@ -317,8 +317,8 @@ HDFS_CFG = {

HBASE_CFG = {
"service": "HBASE",
"name": "hbase01",
"config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01', 'rm_dirty': 'true'},
"name": '{{ pillar['hadoop_services']['hbase_master']['service'] }}',
"config": {'hdfs_service': '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}', 'rm_dirty': 'true'},
"roles":
[
{
Expand Down Expand Up @@ -392,7 +392,7 @@ HBASE_CFG = {

HIVE_CFG = {
"service": "HIVE",
"name": "hive01",
"name": '{{ pillar['hadoop_services']['hive_server']['service'] }}',
"config":
{
'hive_metastore_database_type': 'mysql',
Expand Down Expand Up @@ -457,7 +457,7 @@ HIVE_CFG = {

IMPALA_CFG = {
"service": "IMPALA",
"name": "impala01",
"name": '{{ pillar['hadoop_services']['impala_catalog_server']['service'] }}',
"config": {
'hbase_service': HBASE_CFG['name'],
'hive_service': HIVE_CFG['name'],
Expand Down Expand Up @@ -496,7 +496,7 @@ IMPALA_CFG = {

HUE_CFG = {
"service": "HUE",
"name": "hue01",
"name": '{{ pillar['hadoop_services']['hue_server']['service'] }}',
"config":
{
'hbase_service': HBASE_CFG['name'],
Expand Down Expand Up @@ -534,7 +534,7 @@ HUE_CFG = {

SPARK_CFG = {
'service': 'SPARK_ON_YARN',
'name': 'spark_on_yarn',
'name': '{{ pillar['hadoop_services']['spark_job_histroy_server']['service'] }}',
'config': {
'yarn_service': MAPRED_CFG['name'],
'spark-conf/spark-env.sh_service_safety_valve': "SPARK_PYTHON_PATH={{ app_packages_dir }}/lib/python2.7/site-packages\nexport PYSPARK_DRIVER_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYSPARK_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYTHONPATH=\"$PYTHONPATH:$SPARK_PYTHON_PATH\""
Expand Down
26 changes: 13 additions & 13 deletions salt/cdh/templates/cfg_production.py.tpl
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,9 @@ CMS_CFG = {
}

OOZIE_CFG = {"service": "OOZIE",
"name": "oozie01",
"config": {'mapreduce_yarn_service': 'yarn01',
'zookeeper_service': 'zk01'},
"name": '{{ pillar['hadoop_services']['oozie_server']['service'] }}',
"config": {'mapreduce_yarn_service': '{{ pillar['hadoop_services']['yarn_resource_manager']['service'] }}',
'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}'},
"roles": [{"name": "oozie-s",
"type": "OOZIE_SERVER",
"target": "MGR04"}],
Expand All @@ -68,7 +68,7 @@ OOZIE_CFG = {"service": "OOZIE",
'log_directory_free_space_absolute_thresholds': '{"warning": "1050000000","critical": "900000000"}'}}]}

ZK_CFG = {"service": "ZOOKEEPER",
"name": "zk01",
"name": '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}',
"config": {'zookeeper_datadir_autocreate': 'true'},
"roles": [{"name": "zk-s",
"type": "SERVER",
Expand All @@ -92,8 +92,8 @@ ZK_CFG = {"service": "ZOOKEEPER",

MAPRED_CFG = {
"service": "YARN",
"name": "yarn01",
"config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01', 'yarn_log_aggregation_retain_seconds': '265000', 'yarn_log_aggregation_enable': 'false'},
"name": '{{ pillar['hadoop_services']['yarn_resource_manager']['service'] }}',
"config": {'hdfs_service': '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}', 'yarn_log_aggregation_retain_seconds': '265000', 'yarn_log_aggregation_enable': 'false'},
"roles": [
{
"name": "yarn-jh",
Expand Down Expand Up @@ -189,7 +189,7 @@ S3_CONFIG = """\r\n<property><name>fs.s3a.access.key</name><value>{{ aws_key }}<

HDFS_CFG = {
"service": "HDFS",
"name": "hdfs01",
"name": '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}',
"config":
{
'dfs_replication': 2,
Expand Down Expand Up @@ -299,8 +299,8 @@ HDFS_CFG = {

HBASE_CFG = {
"service": "HBASE",
"name": "hbase01",
"config": {'hdfs_service': 'hdfs01', 'zookeeper_service': 'zk01'},
"name": '{{ pillar['hadoop_services']['hbase_master']['service'] }}',
"config": {'hdfs_service': '{{ pillar['hadoop_services']['hdfs_namenode']['service'] }}', 'zookeeper_service': '{{ pillar['hadoop_services']['zookeeper_server']['service'] }}'},
"roles":
[
{
Expand Down Expand Up @@ -371,7 +371,7 @@ HBASE_CFG = {

HIVE_CFG = {
"service": "HIVE",
"name": "hive01",
"name": '{{ pillar['hadoop_services']['hive_server']['service'] }}',
"config":
{
'hive_metastore_database_type': 'mysql',
Expand Down Expand Up @@ -424,7 +424,7 @@ HIVE_CFG = {

IMPALA_CFG = {
"service": "IMPALA",
"name": "impala01",
"name": '{{ pillar['hadoop_services']['impala_catalog_server']['service'] }}',
"config": {
'hbase_service': HBASE_CFG['name'],
'hive_service': HIVE_CFG['name'],
Expand All @@ -449,7 +449,7 @@ IMPALA_CFG = {

HUE_CFG = {
"service": "HUE",
"name": "hue01",
"name": '{{ pillar['hadoop_services']['hue_server']['service'] }}',
"config":
{
'hbase_service': HBASE_CFG['name'],
Expand Down Expand Up @@ -486,7 +486,7 @@ HUE_CFG = {

SPARK_CFG = {
'service': 'SPARK_ON_YARN',
'name': 'spark_on_yarn',
'name': '{{ pillar['hadoop_services']['spark_job_histroy_server']['service'] }}',
'config': {
'yarn_service': MAPRED_CFG['name'],
'spark-conf/spark-env.sh_service_safety_valve':"SPARK_PYTHON_PATH={{ app_packages_dir }}/lib/python2.7/site-packages\nexport PYSPARK_DRIVER_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYSPARK_PYTHON=/opt/pnda/anaconda/bin/python\nexport PYTHONPATH=\"$PYTHONPATH:$SPARK_PYTHON_PATH\""
Expand Down
Loading

0 comments on commit 2dc4235

Please sign in to comment.