diff --git a/contrib/hawq-docker/Makefile b/contrib/hawq-docker/Makefile index 20053524b2..6895658761 100644 --- a/contrib/hawq-docker/Makefile +++ b/contrib/hawq-docker/Makefile @@ -25,11 +25,14 @@ OS_VERSION := centos7 # Do not use underscore "_" in CLUSTER_ID CLUSTER_ID := $(OS_VERSION) # Monut this local directory to /data in data container and share with other containers -LOCAL := +LOCAL := # networks used in docker NETWORK := $(CLUSTER_ID)_hawq_network +HAWQ_HOME := "/data/hawq-devel" +PXF_CLASSPATH_TEMPLATE = "hdp" +JAVA_TOOL_OPTIONS := -Dfile.encoding=UTF8 -all: +all: @echo " Usage:" @echo " To setup a build and test environment: make run" @echo " To start all containers: make start" @@ -37,10 +40,21 @@ all: @echo " To remove hdfs containers: make clean" @echo " To remove all containers: make distclean" @echo "" - @echo " To build images locally: make build" + @echo " To build images locally: make build-image" @echo " To pull latest images: make pull" + @echo "" + @echo " To build Hawq runtime: make build-hawq" + @echo " To initialize Hawq on Namenode: make init-hawq" + @echo " To start Hawq on Namenode: make start-hawq" + @echo " To stop Hawq on Namenode: make stop-hawq" + @echo " To check Hawq status on Namenode: make status-hawq" + @echo " To build PXF runtime: make build-pxf" + @echo " To initialize PXF on Namenode/Datanodes: make init-pxf" + @echo " To start PXF on Namenode/Datanodes: make start-pxf" + @echo " To stop PXF on on Namenode/Datanodes: make stop-hawq" + @echo " To check PXF status on Namenode/Datanodes: make status-pxf" -build: +build-image: @make -f $(THIS_MAKEFILE_PATH) build-hawq-dev-$(OS_VERSION) @make -f $(THIS_MAKEFILE_PATH) build-hawq-test-$(OS_VERSION) @echo "Build Images Done!" @@ -51,7 +65,10 @@ build-hawq-dev-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-dev/Dockerfil build-hawq-test-$(OS_VERSION): $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/Dockerfile @echo build hawq-test:$(OS_VERSION) image - docker build -t hawq/hawq-test:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/ + docker build \ + --build-arg=PXF_CLASSPATH_TEMPLATE="`cat ../../pxf/pxf-service/src/configs/templates/pxf-private-${PXF_CLASSPATH_TEMPLATE}.classpath.template`" \ + --build-arg=PXF_LOG4J_PROPERTIES="`cat ../../pxf/pxf-service/src/main/resources/pxf-log4j.properties`" \ + -t hawq/hawq-test:$(OS_VERSION) $(TOP_DIR)/$(OS_VERSION)-docker/hawq-test/ create-data-container: @echo create ${CLUSTER_ID}-data container @@ -227,3 +244,218 @@ distclean: docker network rm $(NETWORK) 2>&1 >/dev/null || true; \ fi @echo "Distclean Done!" + +build-hawq: + @echo "Make sure you have executed make build-image" + @echo "Make sure you have executed make run" + @echo "Logging into ${CLUSTER_ID}-namenode container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "JAVA_TOOL_OPTIONS=$(JAVA_TOOL_OPTIONS)" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-hawq.sh --build"; \ + else \ + echo "${CLUSTER_ID}-namenode container does not exist!"; \ + fi + +init-hawq: + @echo "Make sure you have executed make build-hawq" + @echo "Logging into ${CLUSTER_ID}-namenode container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "NAMENODE=${CLUSTER_ID}-namenode" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-hawq.sh --init"; \ + else \ + echo "${CLUSTER_ID}-namenode container does not exist!"; \ + fi + +start-hawq: + @echo "Make sure you have executed make init-hawq" + @echo "Logging into ${CLUSTER_ID}-namenode container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "JAVA_TOOL_OPTIONS=$(JAVA_TOOL_OPTIONS)" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-hawq.sh --start"; \ + else \ + echo "${CLUSTER_ID}-namenode container does not exist!"; \ + fi + +stop-hawq: + @echo "Logging into ${CLUSTER_ID}-namenode container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "JAVA_TOOL_OPTIONS=$(JAVA_TOOL_OPTIONS)" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-hawq.sh --stop"; \ + else \ + echo "${CLUSTER_ID}-namenode container does not exist!"; \ + fi + +status-hawq: + @echo "Logging into ${CLUSTER_ID}-namenode container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "JAVA_TOOL_OPTIONS=$(JAVA_TOOL_OPTIONS)" \ + -e "USER=gpadmin" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-hawq.sh --status"; \ + else \ + echo "${CLUSTER_ID}-namenode container does not exist!"; \ + fi + +build-pxf: + @echo "Make sure you have executed make build-image" + @echo "Make sure you have executed make run" + @make -f $(THIS_MAKEFILE_PATH) pxf-namenode + @i=1; \ + while [ $$i -le $(NDATANODES) ] ; do \ + make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i pxf-datanode; \ + i=$$((i+1)); \ + done + +pxf-namenode: + @echo "Logging into ${CLUSTER_ID}-namenode container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "JAVA_TOOL_OPTIONS=$(JAVA_TOOL_OPTIONS)" \ + -e "PXF_CLASSPATH_TEMPLATE=$(PXF_CLASSPATH_TEMPLATE)" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-pxf.sh --build"; \ + else \ + echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \ + fi + +pxf-datanode: + @echo "Logging into ${CLUSTER_ID}-datanode$(CUR_DATANODE) container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "JAVA_TOOL_OPTIONS=$(JAVA_TOOL_OPTIONS)" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-datanode$(CUR_DATANODE) /bin/bash -c "service-pxf.sh --build"; \ + else \ + echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \ + fi + +init-pxf: + @echo "Make sure you have executed make build-pxf" + @make -f $(THIS_MAKEFILE_PATH) init-pxf-namenode + @i=1; \ + while [ $$i -le $(NDATANODES) ] ; do \ + make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i init-pxf-datanode; \ + i=$$((i+1)); \ + done + +init-pxf-namenode: + @echo "Logging into ${CLUSTER_ID}-namenode container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "NAMENODE=${CLUSTER_ID}-namenode" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-pxf.sh --init"; \ + else \ + echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \ + fi + +init-pxf-datanode: + @echo "Logging into ${CLUSTER_ID}-datanode$(CUR_DATANODE) container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "NAMENODE=${CLUSTER_ID}-namenode" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-datanode$(CUR_DATANODE) /bin/bash -c "service-pxf.sh --init"; \ + else \ + echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \ + fi + +start-pxf: + @echo "Make sure you have executed make init-pxf" + @make -f $(THIS_MAKEFILE_PATH) start-pxf-namenode + @i=1; \ + while [ $$i -le $(NDATANODES) ] ; do \ + make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i start-pxf-datanode; \ + i=$$((i+1)); \ + done + +start-pxf-namenode: + @echo "Logging into ${CLUSTER_ID}-namenode container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "NAMENODE=${CLUSTER_ID}-namenode" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-pxf.sh --start"; \ + else \ + echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \ + fi + +start-pxf-datanode: + @echo "Logging into ${CLUSTER_ID}-datanode$(CUR_DATANODE) container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "NAMENODE=${CLUSTER_ID}-namenode" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-datanode$(CUR_DATANODE) /bin/bash -c "service-pxf.sh --start"; \ + else \ + echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \ + fi + +stop-pxf: + @make -f $(THIS_MAKEFILE_PATH) stop-pxf-namenode + @i=1; \ + while [ $$i -le $(NDATANODES) ] ; do \ + make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i stop-pxf-datanode; \ + i=$$((i+1)); \ + done + +stop-pxf-namenode: + @echo "Logging into ${CLUSTER_ID}-namenode container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "NAMENODE=${CLUSTER_ID}-namenode" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-pxf.sh --stop"; \ + else \ + echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \ + fi + +stop-pxf-datanode: + @echo "Logging into ${CLUSTER_ID}-datanode$(CUR_DATANODE) container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "NAMENODE=${CLUSTER_ID}-namenode" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-datanode$(CUR_DATANODE) /bin/bash -c "service-pxf.sh --stop"; \ + else \ + echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \ + fi + +status-pxf: + @make -f $(THIS_MAKEFILE_PATH) status-pxf-namenode + @i=1; \ + while [ $$i -le $(NDATANODES) ] ; do \ + make -f $(THIS_MAKEFILE_PATH) CUR_DATANODE=$$i status-pxf-datanode; \ + i=$$((i+1)); \ + done + +status-pxf-namenode: + @echo "Logging into ${CLUSTER_ID}-namenode container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-namenode" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "NAMENODE=${CLUSTER_ID}-namenode" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-namenode /bin/bash -c "service-pxf.sh --status"; \ + else \ + echo "${CLUSTER_ID}-namenode container does not exist!" && exit 1; \ + fi + +status-pxf-datanode: + @echo "Logging into ${CLUSTER_ID}-datanode$(CUR_DATANODE) container" + @if [ ! -z "`docker ps -a --filter="name=${CLUSTER_ID}-datanode$(CUR_DATANODE)" | grep -v CONTAINER`" ]; then \ + docker exec \ + -e "HAWQ_HOME=$(HAWQ_HOME)" \ + -e "NAMENODE=${CLUSTER_ID}-namenode" \ + -u gpadmin --privileged -it ${CLUSTER_ID}-datanode$(CUR_DATANODE) /bin/bash -c "service-pxf.sh --status"; \ + else \ + echo "${CLUSTER_ID}-datanode$(CUR_DATANODE) container does not exist!" && exit 1; \ + fi \ No newline at end of file diff --git a/contrib/hawq-docker/README.md b/contrib/hawq-docker/README.md index 0e70de9991..cc7653c2e1 100644 --- a/contrib/hawq-docker/README.md +++ b/contrib/hawq-docker/README.md @@ -121,7 +121,16 @@ make pull To stop all containers: make stop To remove hdfs containers: make clean To remove all containers: make distclean - To build images locally: make build + To build images locally: make build-image To pull latest images: make pull + To build Hawq runtime: make build-hawq + To initialize Hawq on Namenode: make init-hawq + To start Hawq on Namenode: make start-hawq + To stop Hawq on Namenode: make stop-hawq + To check Hawq status on Namenode: make status-hawq + To build PXF runtime: make build-pxf + To initialize PXF on Namenode/Datanodes: make init-pxf + To start PXF on Namenode/Datanodes: make start-pxf + To stop PXF on on Namenode/Datanodes: make stop-hawq + To check PXF status on Namenode/Datanodes: make status-hawq ``` - diff --git a/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile b/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile index e1678c1276..46ff7432a2 100644 --- a/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile +++ b/contrib/hawq-docker/centos7-docker/hawq-dev/Dockerfile @@ -30,7 +30,7 @@ RUN yum install -y epel-release && \ openldap-devel protobuf-devel readline-devel net-snmp-devel apr-devel \ libesmtp-devel python-pip json-c-devel \ java-1.7.0-openjdk-devel lcov cmake \ - openssh-clients openssh-server perl-JSON && \ + openssh-clients openssh-server perl-JSON unzip && \ yum clean all RUN rpm -ivh --nodeps https://rpmfind.net/linux/centos/6.10/os/x86_64/Packages/bison-2.4.1-5.el6.x86_64.rpm diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile b/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile index ea5e22c599..497e6a6294 100644 --- a/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile +++ b/contrib/hawq-docker/centos7-docker/hawq-test/Dockerfile @@ -21,6 +21,9 @@ MAINTAINER Richard Guo USER root +ARG PXF_CLASSPATH_TEMPLATE +ARG PXF_LOG4J_PROPERTIES + ## install HDP 2.5.0 RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.5.0.0/hdp.repo" -o /etc/yum.repos.d/hdp.repo && \ yum install -y hadoop hadoop-hdfs hadoop-libhdfs hadoop-yarn hadoop-mapreduce hadoop-client hdp-select && \ @@ -28,9 +31,16 @@ RUN curl -L "http://public-repo-1.hortonworks.com/HDP/centos7/2.x/updates/2.5.0. RUN ln -s /usr/hdp/current/hadoop-hdfs-namenode/../hadoop/sbin/hadoop-daemon.sh /usr/bin/hadoop-daemon.sh +RUN touch /tmp/pxf-private.classpath && \ + touch /tmp/log4j.properties && \ + echo "$PXF_CLASSPATH_TEMPLATE" > /tmp/pxf-private.classpath && \ + echo "$PXF_LOG4J_PROPERTIES" > /tmp/pxf-log4j.properties + COPY conf/* /etc/hadoop/conf/ COPY entrypoint.sh /usr/bin/entrypoint.sh +COPY service-hawq.sh /usr/bin/service-hawq.sh +COPY service-pxf.sh /usr/bin/service-pxf.sh COPY start-hdfs.sh /usr/bin/start-hdfs.sh USER gpadmin diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml b/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml index afc37fcfc7..69ce7c9131 100644 --- a/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml +++ b/contrib/hawq-docker/centos7-docker/hawq-test/conf/core-site.xml @@ -19,6 +19,6 @@ fs.defaultFS - hdfs://${hdfs.namenode}:8020 + hdfs://@hdfs.namenode@:8020 diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh b/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh index abdc508a37..308eb1e7bc 100755 --- a/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh +++ b/contrib/hawq-docker/centos7-docker/hawq-test/entrypoint.sh @@ -27,6 +27,9 @@ if [ ! -f /etc/profile.d/hadoop.sh ]; then sudo chmod a+x /etc/profile.d/hadoop.sh fi +sudo chmod 777 /etc/hadoop/conf/core-site.xml +sudo sed "s/@hdfs.namenode@/$NAMENODE/g" -i /etc/hadoop/conf/core-site.xml + sudo start-hdfs.sh sudo sysctl -p diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/service-hawq.sh b/contrib/hawq-docker/centos7-docker/hawq-test/service-hawq.sh new file mode 100755 index 0000000000..b6b298fa3e --- /dev/null +++ b/contrib/hawq-docker/centos7-docker/hawq-test/service-hawq.sh @@ -0,0 +1,105 @@ +#!/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +build() { + git clone https://github.com/apache/hawq.git /data/hawq + + cd /data/hawq + ./configure --prefix=${HAWQ_HOME} + # make -j16 + make + make install + + source ${HAWQ_HOME}/greenplum_path.sh + + sudo sed 's|localhost|centos7-namenode|g' -i ${GPHOME}/etc/hawq-site.xml + sudo echo 'centos7-datanode1' > ${GPHOME}/etc/slaves + sudo echo 'centos7-datanode2' >> ${GPHOME}/etc/slaves + sudo echo 'centos7-datanode3' >> ${GPHOME}/etc/slaves + + sudo -u hdfs hdfs dfs -chown gpadmin / + + echo "Build HAWQ Done!" +} + +init() { + source ${HAWQ_HOME}/greenplum_path.sh + + export BASEDIR=/data + export HAWQSITE_CONF=${GPHOME}/etc/hawq-site.xml + export HOME=/home/gpadmin + export HOSTNAME=centos7-namenode + export JAVA_HOME=/etc/alternatives/java_sdk + export LD_LIBRARY_PATH=${GPHOME}/lib:/${GPHOME}/lib: + export LIBHDFS3_CONF=${GPHOME}/etc/hdfs-client.xml + export LIBYARN_CONF=${GPHOME}/etc/yarn-client.xml + export NAMENODE=${NAMENODE} + export OPENSSL_CONF=${GPHOME}/etc/openssl.cnf + export PATH=/${GPHOME}/bin:/${GPHOME}/bin:/usr/lib64/qt-3.3/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + export PWD=/data + export PYTHONPATH=/${GPHOME}/lib/python:/${GPHOME}/lib/python: + export USER=gpadmin + + echo "Initializing HAWQ Cluster" + hawq init cluster -a + echo "Initializing HAWQ Cluster Done!" +} + +start() { + source ${HAWQ_HOME}/greenplum_path.sh + echo "Starting HAWQ Cluster" + hawq start cluster -a + echo "Starting HAWQ Cluster Done!" +} + +stop() { + source ${HAWQ_HOME}/greenplum_path.sh + echo "Stopping HAWQ Cluster" + hawq stop cluster -a + echo "Stopping HAWQ Cluster Done!" +} + +status() { + source ${HAWQ_HOME}/greenplum_path.sh + echo "Getting HAWQ Cluster status" + hawq state + echo "Getting HAWQ Cluster status Done!" +} + +case "$1" in + '--build') + build + ;; + '--init') + init + ;; + '--start') + start + ;; + '--stop') + stop + ;; + '--status') + status + ;; + *) + echo "Usage: $0 {--build|--init|--start|--stop|--status}" +esac + +exit 0 diff --git a/contrib/hawq-docker/centos7-docker/hawq-test/service-pxf.sh b/contrib/hawq-docker/centos7-docker/hawq-test/service-pxf.sh new file mode 100755 index 0000000000..d69a81429c --- /dev/null +++ b/contrib/hawq-docker/centos7-docker/hawq-test/service-pxf.sh @@ -0,0 +1,120 @@ +#!/bin/bash + +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +build() { + source ${HAWQ_HOME}/greenplum_path.sh + export PXF_HOME=${GPHOME}/pxf + + sudo chown -R gpadmin:gpadmin ${PXF_HOME}/conf/ + sudo echo "source ${GPHOME}/greenplum_path.sh" >> /home/gpadmin/.bashrc + + cd /data/hawq/pxf + # make -j16 + make + make install + + sudo sed 's|-pxf|-gpadmin|g' -i ${PXF_HOME}/conf/pxf-env.sh + + rm -rf ${PXF_HOME}/conf/pxf-private.classpath + rm -rf ${PXF_HOME}/conf/pxf-log4j.properties + + sudo cp /tmp/pxf-private.classpath ${PXF_HOME}/conf/pxf-private.classpath + sudo cp /tmp/pxf-log4j.properties ${PXF_HOME}/conf/pxf-log4j.properties + sudo sed 's|pxf\.log\.dir|PXF_HOME|g' -i ${PXF_HOME}/conf/pxf-log4j.properties + + echo "Make PXF Done!" +} + +init() { + source ${HAWQ_HOME}/greenplum_path.sh + export PXF_HOME=${GPHOME}/pxf + + export BASEDIR=/data + export HAWQSITE_CONF=${GPHOME}/etc/hawq-site.xml + export HOME=/home/gpadmin + export JAVA_HOME=/etc/alternatives/java_sdk + export LD_LIBRARY_PATH=${GPHOME}/lib:/${GPHOME}/lib: + export LIBHDFS3_CONF=${GPHOME}/etc/hdfs-client.xml + export LIBYARN_CONF=${GPHOME}/etc/yarn-client.xml + export NAMENODE=${NAMENODE} + export OPENSSL_CONF=${GPHOME}/etc/openssl.cnf + export PATH=/${GPHOME}/bin:/${GPHOME}/bin:/usr/lib64/qt-3.3/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin + export PWD=/data + export PYTHONPATH=/${GPHOME}/lib/python:/${GPHOME}/lib/python: + export USER=gpadmin + + sudo passwd -d gpadmin + sudo mkdir -p /var/run/pxf + sudo mkdir -p /var/log/pxf + sudo touch /var/log/pxf/catalina.out + sudo chmod -R 777 /var/run/pxf + sudo chmod -R 777 /var/log/pxf + + echo "Initializing PXF Service" + ${PXF_HOME}/bin/pxf init + echo "Initializing PXF Service Done!" +} + +start() { + source ${HAWQ_HOME}/greenplum_path.sh + export PXF_HOME=${GPHOME}/pxf + + export JAVA_HOME=/etc/alternatives/java_sdk + + echo "Starting PXF Service" + ${PXF_HOME}/bin/pxf start + echo "Starting PXF Service Done!" +} + +stop() { + source ${HAWQ_HOME}/greenplum_path.sh + export PXF_HOME=${GPHOME}/pxf + + export JAVA_HOME=/etc/alternatives/java_sdk + + echo "Stopping PXF Service" + ${PXF_HOME}/bin/pxf stop + echo "Stopping PXF Service Done!" +} + +status() { + curl "localhost:51200/pxf/ProtocolVersion"; echo +} + +case "$1" in + '--build') + build + ;; + '--init') + init + ;; + '--start') + start + ;; + '--stop') + stop + ;; + '--status') + status + ;; + *) + echo "Usage: $0 {--build|--init|--start|--stop|--status}" +esac + +exit 0