FAQ
The Stabdby JobTracker´s IP address is used by mistack in the hue.ini which
auto generated by Cloudera Manager.


[lili@itri-bd-s29 ~]$ sudo cat /etc/hadoop/conf/mapred-site.xml
<?xml version="1.0" encoding="UTF-8"?>

<!--Autogenerated by Cloudera CM on 2014-03-17T08:28:32.831Z-->
<configuration>
   <property>
     <name>mapred.job.tracker</name>
     <value>logicaljt</value>
   </property>
   <property>
     <name>mapred.jobtrackers.logicaljt</name>
     <value>jobtracker71,jobtracker111</value>
   </property>
   <property>
     <name>mapred.client.failover.proxy.provider.logicaljt</name>
     <value>org.apache.hadoop.mapred.ConfiguredFailoverProxyProvider</value>
   </property>
   <property>
     <name>mapred.jobtracker.rpc-address.logicaljt.jobtracker71</name>
     <value>itri-bd-m00.hpcc.jp:8021</value>
   </property>
   <property>
     <name>mapred.job.tracker.http.address.logicaljt.jobtracker71</name>
     <value>0.0.0.0:50030</value>
   </property>
   <property>
     <name>mapred.ha.jobtracker.rpc-address.logicaljt.jobtracker71</name>
     <value>itri-bd-m00.hpcc.jp:8023</value>
   </property>
   <property>

<name>mapred.ha.jobtracker.http-redirect-address.logicaljt.jobtracker71</name>
     <value>itri-bd-m00.hpcc.jp:50030</value>
   </property>
   <property>
     <name>mapred.jobtracker.rpc-address.logicaljt.jobtracker111</name>
     <value>itri-bd-m01.hpcc.jp:8021</value>
   </property>
   <property>
     <name>mapred.job.tracker.http.address.logicaljt.jobtracker111</name>
     <value>0.0.0.0:50030</value>
   </property>
   <property>
     <name>mapred.ha.jobtracker.rpc-address.logicaljt.jobtracker111</name>
     <value>itri-bd-m01.hpcc.jp:8023</value>
   </property>
   <property>

<name>mapred.ha.jobtracker.http-redirect-address.logicaljt.jobtracker111</name>
     <value>itri-bd-m01.hpcc.jp:50030</value>
   </property>
   <property>
     <name>ha.zookeeper.quorum</name>
     <value>itri-bd-m01.hpcc.jp:2181,itri-bd-s00.hpcc.jp:2181,
itri-bd-m00.hpcc.jp:2181</value>
   </property>
   <property>
     <name>mapred.jobtracker.restart.recover</name>
     <value>true</value>
   </property>
   <property>
     <name>mapred.job.tracker.persist.jobstatus.active</name>
     <value>true</value>
   </property>
   <property>
     <name>mapred.ha.automatic-failover.enabled</name>
     <value>true</value>
   </property>
   <property>
     <name>mapred.ha.fencing.methods</name>
     <value>shell(/bin/true)</value>
   </property>
   <property>
     <name>mapred.ha.zkfc.port</name>
     <value>8018</value>
   </property>
   <property>
     <name>mapred.client.failover.max.attempts</name>
     <value>15</value>
   </property>
   <property>
     <name>mapred.client.failover.sleep.base.millis</name>
     <value>500</value>
   </property>
   <property>
     <name>mapred.client.failover.sleep.max.millis</name>
     <value>1500</value>
   </property>
   <property>
     <name>mapred.client.failover.connection.retries</name>
     <value>0</value>
   </property>
   <property>
     <name>mapred.client.failover.connection.retries.on.timeouts</name>
     <value>0</value>
   </property>
   <property>
     <name>mapreduce.job.counters.max</name>
     <value>120</value>
   </property>
   <property>
     <name>mapred.output.compress</name>
     <value>false</value>
   </property>
   <property>
     <name>mapred.output.compression.type</name>
     <value>BLOCK</value>
   </property>
   <property>
     <name>mapred.output.compression.codec</name>
     <value>org.apache.hadoop.io.compress.DefaultCodec</value>
   </property>
   <property>
     <name>mapred.map.output.compression.codec</name>
     <value>org.apache.hadoop.io.compress.SnappyCodec</value>
   </property>
   <property>
     <name>mapred.compress.map.output</name>
     <value>true</value>
   </property>
   <property>
     <name>zlib.compress.level</name>
     <value>DEFAULT_COMPRESSION</value>
   </property>
   <property>
     <name>io.sort.factor</name>
     <value>64</value>
   </property>
   <property>
     <name>io.sort.record.percent</name>
     <value>0.05</value>
   </property>
   <property>
     <name>io.sort.spill.percent</name>
     <value>0.8</value>
   </property>
   <property>
     <name>mapred.reduce.parallel.copies</name>
     <value>10</value>
   </property>
   <property>
     <name>mapred.submit.replication</name>
     <value>2</value>
   </property>
   <property>
     <name>mapred.reduce.tasks</name>
     <value>192</value>
   </property>
   <property>
     <name>mapred.userlog.retain.hours</name>
     <value>24</value>
   </property>
   <property>
     <name>io.sort.mb</name>
     <value>242</value>
   </property>
   <property>
     <name>mapred.child.java.opts</name>
     <value> -Xmx1019018840</value>
   </property>
   <property>
     <name>mapred.job.reuse.jvm.num.tasks</name>
     <value>1</value>
   </property>
   <property>
     <name>mapred.map.tasks.speculative.execution</name>
     <value>false</value>
   </property>
   <property>
     <name>mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
   </property>
   <property>
     <name>mapred.reduce.slowstart.completed.maps</name>
     <value>0.8</value>
   </property>
   <property>
     <name>mapreduce.jobtracker.kerberos.principal</name>
     <value>mapred/_HOST@HPCC.JP</value>
   </property>
</configuration>


[lili@itri-bd-s29 ~]$ sudo cat
/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hue.ini
[desktop]
secret_key=n7JjKfVdWNO0dzRh4c4JvOLkMUbANG
http_host=itri-bd-s29.hpcc.jp
ssl_certificate=/opt/cert/server-bd-s29.crt
ssl_private_key=/opt/cert/server-bd-s29.key
http_port=8888
time_zone=Asia/Tokyo
django_debug_mode=0
http_500_debug_mode=0
cherrypy_server_threads=10
default_site_encoding=utf
collect_usage=true
[[auth]]
user_augmentor=desktop.auth.backend.DefaultUserAugmentor
backend=desktop.auth.backend.PamBackend
[[ldap]]
[[[users]]]
[[[groups]]]
[[database]]
engine=sqlite3
host=localhost
port=3306
user=hue
password=
name=/var/lib/hue/desktop.db
[[smtp]]
host=localhost
port=25
user=
password=
tls=no
[[kerberos]]
[hadoop]
[[hdfs_clusters]]
[[[default]]]
fs_defaultfs=hdfs://nameservice1
webhdfs_url=http://itri-bd-s29.hpcc.jp:14000/webhdfs/v1
hadoop_hdfs_home=/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hadoop-hdfs
hadoop_bin=/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hadoop/bin/hadoop
hadoop_conf_dir=/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hadoop-conf
security_enabled=true
temp_dir=/tmp
[[mapred_clusters]]
[[[default]]]
jobtracker_host=itri-bd-m01.hpcc.jp
thrift_port=9290
jobtracker_port=8021
submit_to=true
hadoop_mapred_home=/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hadoop-0.20-mapreduce
hadoop_bin=/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hadoop/bin/hadoop
hadoop_conf_dir=/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hadoop-conf
security_enabled=true
[beeswax]
beeswax_server_host=itri-bd-s29.hpcc.jp
beeswax_meta_server_port=8003
beeswax_server_port=8002
beeswax_server_conn_timeout=120
metastore_conn_timeout=10
beeswax_server_heapsize=256
[jobsub]
remote_data_dir=/user/hue/jobsub30
oozie_url=http://itri-bd-m00.hpcc.jp:11000/oozie
security_enabled=true
[useradmin]
[proxy]
whitelist=(localhost|127\.0\.0\.1):(50030|50070|50060|50075)
[shell]
[[ shelltypes ]]
[[[ pig ]]]
nice_name="Pig Shell (Grunt)"
command="/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/pig/../../bin/pig
-l /dev/null"
help="The command-line interpreter for Pig"
[[[[ environment ]]]]
[[[[[ JAVA_HOME ]]]]]
value="/usr/java/jdk1.7.0_45"
[[[[[ HADOOP_CONF_DIR ]]]]]
value="/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hadoop-conf"
[[[ hbase ]]]
nice_name="HBase Shell"
command="/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hbase/bin/hbase
shell"
help="The command-line HBase client interface."
[[[[ environment ]]]]
[[[[[ JAVA_HOME ]]]]]
value="/usr/java/jdk1.7.0_45"
[[[[[ HADOOP_CONF_DIR ]]]]]
value="/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hadoop-conf"
[[[[[ HBASE_CONF_DIR ]]]]]
value="/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hbase-conf"



2014-03-24 15:06 GMT+09:00 Romain Rigaux <romain@cloudera.com>:
Which Hue? Have you used the Job Tracker HA configuration?


http://gethue.tumblr.com/post/71637613809/jobtracker-high-availability-ha-in-mr1

Romain

On Sun, Mar 23, 2014 at 6:24 PM, 李黎 wrote:

Hi,

I have found a bug of HUE.

When The MapReduce service running at HA mode, the HBase shell can not be
used from HUE weB UI.
The Stabdby JobTracker´s IP address is used by mistack now, the correct
answer shoud use Active JobTracker´s IP address.

Thanks

To unsubscribe from this group and stop receiving emails from it, send
an email to scm-users+unsubscribe@cloudera.org.
To unsubscribe from this group and stop receiving emails from it, send an email to hue-user+unsubscribe@cloudera.org.

Search Discussions

  • 李黎 at Mar 24, 2014 at 8:36 am
    The erroe message is:
    Could not connect to itri-bd-m01.hpcc.jp:9290



    2014-03-24 17:19 GMT+09:00 李黎 <ri-ri@aist.go.jp>:
    The Stabdby JobTracker´s IP address is used by mistack in the hue.ini
    which auto generated by Cloudera Manager.


    [lili@itri-bd-s29 ~]$ sudo cat /etc/hadoop/conf/mapred-site.xml
    <?xml version="1.0" encoding="UTF-8"?>

    <!--Autogenerated by Cloudera CM on 2014-03-17T08:28:32.831Z-->
    <configuration>
    <property>
    <name>mapred.job.tracker</name>
    <value>logicaljt</value>
    </property>
    <property>
    <name>mapred.jobtrackers.logicaljt</name>
    <value>jobtracker71,jobtracker111</value>
    </property>
    <property>
    <name>mapred.client.failover.proxy.provider.logicaljt</name>
    <value>org.apache.hadoop.mapred.ConfiguredFailoverProxyProvider</value>
    </property>
    <property>
    <name>mapred.jobtracker.rpc-address.logicaljt.jobtracker71</name>
    <value>itri-bd-m00.hpcc.jp:8021</value>
    </property>
    <property>
    <name>mapred.job.tracker.http.address.logicaljt.jobtracker71</name>
    <value>0.0.0.0:50030</value>
    </property>
    <property>
    <name>mapred.ha.jobtracker.rpc-address.logicaljt.jobtracker71</name>
    <value>itri-bd-m00.hpcc.jp:8023</value>
    </property>
    <property>

    <name>mapred.ha.jobtracker.http-redirect-address.logicaljt.jobtracker71</name>
    <value>itri-bd-m00.hpcc.jp:50030</value>
    </property>
    <property>
    <name>mapred.jobtracker.rpc-address.logicaljt.jobtracker111</name>
    <value>itri-bd-m01.hpcc.jp:8021</value>
    </property>
    <property>
    <name>mapred.job.tracker.http.address.logicaljt.jobtracker111</name>
    <value>0.0.0.0:50030</value>
    </property>
    <property>
    <name>mapred.ha.jobtracker.rpc-address.logicaljt.jobtracker111</name>
    <value>itri-bd-m01.hpcc.jp:8023</value>
    </property>
    <property>

    <name>mapred.ha.jobtracker.http-redirect-address.logicaljt.jobtracker111</name>
    <value>itri-bd-m01.hpcc.jp:50030</value>
    </property>
    <property>
    <name>ha.zookeeper.quorum</name>
    <value>itri-bd-m01.hpcc.jp:2181,itri-bd-s00.hpcc.jp:2181,
    itri-bd-m00.hpcc.jp:2181</value>
    </property>
    <property>
    <name>mapred.jobtracker.restart.recover</name>
    <value>true</value>
    </property>
    <property>
    <name>mapred.job.tracker.persist.jobstatus.active</name>
    <value>true</value>
    </property>
    <property>
    <name>mapred.ha.automatic-failover.enabled</name>
    <value>true</value>
    </property>
    <property>
    <name>mapred.ha.fencing.methods</name>
    <value>shell(/bin/true)</value>
    </property>
    <property>
    <name>mapred.ha.zkfc.port</name>
    <value>8018</value>
    </property>
    <property>
    <name>mapred.client.failover.max.attempts</name>
    <value>15</value>
    </property>
    <property>
    <name>mapred.client.failover.sleep.base.millis</name>
    <value>500</value>
    </property>
    <property>
    <name>mapred.client.failover.sleep.max.millis</name>
    <value>1500</value>
    </property>
    <property>
    <name>mapred.client.failover.connection.retries</name>
    <value>0</value>
    </property>
    <property>
    <name>mapred.client.failover.connection.retries.on.timeouts</name>
    <value>0</value>
    </property>
    <property>
    <name>mapreduce.job.counters.max</name>
    <value>120</value>
    </property>
    <property>
    <name>mapred.output.compress</name>
    <value>false</value>
    </property>
    <property>
    <name>mapred.output.compression.type</name>
    <value>BLOCK</value>
    </property>
    <property>
    <name>mapred.output.compression.codec</name>
    <value>org.apache.hadoop.io.compress.DefaultCodec</value>
    </property>
    <property>
    <name>mapred.map.output.compression.codec</name>
    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
    </property>
    <property>
    <name>mapred.compress.map.output</name>
    <value>true</value>
    </property>
    <property>
    <name>zlib.compress.level</name>
    <value>DEFAULT_COMPRESSION</value>
    </property>
    <property>
    <name>io.sort.factor</name>
    <value>64</value>
    </property>
    <property>
    <name>io.sort.record.percent</name>
    <value>0.05</value>
    </property>
    <property>
    <name>io.sort.spill.percent</name>
    <value>0.8</value>
    </property>
    <property>
    <name>mapred.reduce.parallel.copies</name>
    <value>10</value>
    </property>
    <property>
    <name>mapred.submit.replication</name>
    <value>2</value>
    </property>
    <property>
    <name>mapred.reduce.tasks</name>
    <value>192</value>
    </property>
    <property>
    <name>mapred.userlog.retain.hours</name>
    <value>24</value>
    </property>
    <property>
    <name>io.sort.mb</name>
    <value>242</value>
    </property>
    <property>
    <name>mapred.child.java.opts</name>
    <value> -Xmx1019018840</value>
    </property>
    <property>
    <name>mapred.job.reuse.jvm.num.tasks</name>
    <value>1</value>
    </property>
    <property>
    <name>mapred.map.tasks.speculative.execution</name>
    <value>false</value>
    </property>
    <property>
    <name>mapred.reduce.tasks.speculative.execution</name>
    <value>false</value>
    </property>
    <property>
    <name>mapred.reduce.slowstart.completed.maps</name>
    <value>0.8</value>
    </property>
    <property>
    <name>mapreduce.jobtracker.kerberos.principal</name>
    <value>mapred/_HOST@HPCC.JP</value>
    </property>
    </configuration>


    [lili@itri-bd-s29 ~]$ sudo cat
    /var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hue.ini
    [desktop]
    secret_key=n7JjKfVdWNO0dzRh4c4JvOLkMUbANG
    http_host=itri-bd-s29.hpcc.jp
    ssl_certificate=/opt/cert/server-bd-s29.crt
    ssl_private_key=/opt/cert/server-bd-s29.key
    http_port=8888
    time_zone=Asia/Tokyo
    django_debug_mode=0
    http_500_debug_mode=0
    cherrypy_server_threads=10
    default_site_encoding=utf
    collect_usage=true
    [[auth]]
    user_augmentor=desktop.auth.backend.DefaultUserAugmentor
    backend=desktop.auth.backend.PamBackend
    [[ldap]]
    [[[users]]]
    [[[groups]]]
    [[database]]
    engine=sqlite3
    host=localhost
    port=3306
    user=hue
    password=
    name=/var/lib/hue/desktop.db
    [[smtp]]
    host=localhost
    port=25
    user=
    password=
    tls=no
    [[kerberos]]
    [hadoop]
    [[hdfs_clusters]]
    [[[default]]]
    fs_defaultfs=hdfs://nameservice1
    webhdfs_url=http://itri-bd-s29.hpcc.jp:14000/webhdfs/v1

    hadoop_hdfs_home=/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hadoop-hdfs

    hadoop_bin=/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hadoop/bin/hadoop

    hadoop_conf_dir=/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hadoop-conf
    security_enabled=true
    temp_dir=/tmp
    [[mapred_clusters]]
    [[[default]]]
    jobtracker_host=itri-bd-m01.hpcc.jp
    thrift_port=9290
    jobtracker_port=8021
    submit_to=true

    hadoop_mapred_home=/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hadoop-0.20-mapreduce

    hadoop_bin=/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hadoop/bin/hadoop

    hadoop_conf_dir=/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hadoop-conf
    security_enabled=true
    [beeswax]
    beeswax_server_host=itri-bd-s29.hpcc.jp
    beeswax_meta_server_port=8003
    beeswax_server_port=8002
    beeswax_server_conn_timeout=120
    metastore_conn_timeout=10
    beeswax_server_heapsize=256
    [jobsub]
    remote_data_dir=/user/hue/jobsub30
    oozie_url=http://itri-bd-m00.hpcc.jp:11000/oozie
    security_enabled=true
    [useradmin]
    [proxy]
    whitelist=(localhost|127\.0\.0\.1):(50030|50070|50060|50075)
    [shell]
    [[ shelltypes ]]
    [[[ pig ]]]
    nice_name="Pig Shell (Grunt)"
    command="/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/pig/../../bin/pig
    -l /dev/null"
    help="The command-line interpreter for Pig"
    [[[[ environment ]]]]
    [[[[[ JAVA_HOME ]]]]]
    value="/usr/java/jdk1.7.0_45"
    [[[[[ HADOOP_CONF_DIR ]]]]]
    value="/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hadoop-conf"
    [[[ hbase ]]]
    nice_name="HBase Shell"
    command="/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hbase/bin/hbase
    shell"
    help="The command-line HBase client interface."
    [[[[ environment ]]]]
    [[[[[ JAVA_HOME ]]]]]
    value="/usr/java/jdk1.7.0_45"
    [[[[[ HADOOP_CONF_DIR ]]]]]
    value="/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hadoop-conf"
    [[[[[ HBASE_CONF_DIR ]]]]]
    value="/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hbase-conf"



    2014-03-24 15:06 GMT+09:00 Romain Rigaux <romain@cloudera.com>:

    Which Hue? Have you used the Job Tracker HA configuration?

    http://gethue.tumblr.com/post/71637613809/jobtracker-high-availability-ha-in-mr1

    Romain

    On Sun, Mar 23, 2014 at 6:24 PM, 李黎 wrote:

    Hi,

    I have found a bug of HUE.

    When The MapReduce service running at HA mode, the HBase shell can not
    be used from HUE weB UI.
    The Stabdby JobTracker´s IP address is used by mistack now, the correct
    answer shoud use Active JobTracker´s IP address.

    Thanks

    To unsubscribe from this group and stop receiving emails from it, send
    an email to scm-users+unsubscribe@cloudera.org.
    To unsubscribe from this group and stop receiving emails from it, send an email to hue-user+unsubscribe@cloudera.org.
  • 李黎 at Mar 25, 2014 at 5:43 am
    I solved the problem after adding configuration of Thrift and Gateway
    instances of the HBase.

    Thanks


    2014-03-25 5:48 GMT+09:00 Romain Rigaux <romain@cloudera.com>:
    This means that your HBase Thrift Service version 1 is no up. Hue is using
    it to communicate with HBase:
    http://gethue.tumblr.com/post/55581863077/hue-2-5-and-its-hbase-app-is-out

    Romain

    On Mon, Mar 24, 2014 at 1:36 AM, 李黎 wrote:

    The erroe message is:
    Could not connect to itri-bd-m01.hpcc.jp:9290



    2014-03-24 17:19 GMT+09:00 李黎 <ri-ri@aist.go.jp>:

    The Stabdby JobTracker´s IP address is used by mistack in the hue.ini
    which auto generated by Cloudera Manager.


    [lili@itri-bd-s29 ~]$ sudo cat /etc/hadoop/conf/mapred-site.xml
    <?xml version="1.0" encoding="UTF-8"?>

    <!--Autogenerated by Cloudera CM on 2014-03-17T08:28:32.831Z-->
    <configuration>
    <property>
    <name>mapred.job.tracker</name>
    <value>logicaljt</value>
    </property>
    <property>
    <name>mapred.jobtrackers.logicaljt</name>
    <value>jobtracker71,jobtracker111</value>
    </property>
    <property>
    <name>mapred.client.failover.proxy.provider.logicaljt</name>

    <value>org.apache.hadoop.mapred.ConfiguredFailoverProxyProvider</value>
    </property>
    <property>
    <name>mapred.jobtracker.rpc-address.logicaljt.jobtracker71</name>
    <value>itri-bd-m00.hpcc.jp:8021</value>
    </property>
    <property>
    <name>mapred.job.tracker.http.address.logicaljt.jobtracker71</name>
    <value>0.0.0.0:50030</value>
    </property>
    <property>
    <name>mapred.ha.jobtracker.rpc-address.logicaljt.jobtracker71</name>
    <value>itri-bd-m00.hpcc.jp:8023</value>
    </property>
    <property>

    <name>mapred.ha.jobtracker.http-redirect-address.logicaljt.jobtracker71</name>
    <value>itri-bd-m00.hpcc.jp:50030</value>
    </property>
    <property>
    <name>mapred.jobtracker.rpc-address.logicaljt.jobtracker111</name>
    <value>itri-bd-m01.hpcc.jp:8021</value>
    </property>
    <property>
    <name>mapred.job.tracker.http.address.logicaljt.jobtracker111</name>
    <value>0.0.0.0:50030</value>
    </property>
    <property>
    <name>mapred.ha.jobtracker.rpc-address.logicaljt.jobtracker111</name>
    <value>itri-bd-m01.hpcc.jp:8023</value>
    </property>
    <property>

    <name>mapred.ha.jobtracker.http-redirect-address.logicaljt.jobtracker111</name>
    <value>itri-bd-m01.hpcc.jp:50030</value>
    </property>
    <property>
    <name>ha.zookeeper.quorum</name>
    <value>itri-bd-m01.hpcc.jp:2181,itri-bd-s00.hpcc.jp:2181,
    itri-bd-m00.hpcc.jp:2181</value>
    </property>
    <property>
    <name>mapred.jobtracker.restart.recover</name>
    <value>true</value>
    </property>
    <property>
    <name>mapred.job.tracker.persist.jobstatus.active</name>
    <value>true</value>
    </property>
    <property>
    <name>mapred.ha.automatic-failover.enabled</name>
    <value>true</value>
    </property>
    <property>
    <name>mapred.ha.fencing.methods</name>
    <value>shell(/bin/true)</value>
    </property>
    <property>
    <name>mapred.ha.zkfc.port</name>
    <value>8018</value>
    </property>
    <property>
    <name>mapred.client.failover.max.attempts</name>
    <value>15</value>
    </property>
    <property>
    <name>mapred.client.failover.sleep.base.millis</name>
    <value>500</value>
    </property>
    <property>
    <name>mapred.client.failover.sleep.max.millis</name>
    <value>1500</value>
    </property>
    <property>
    <name>mapred.client.failover.connection.retries</name>
    <value>0</value>
    </property>
    <property>
    <name>mapred.client.failover.connection.retries.on.timeouts</name>
    <value>0</value>
    </property>
    <property>
    <name>mapreduce.job.counters.max</name>
    <value>120</value>
    </property>
    <property>
    <name>mapred.output.compress</name>
    <value>false</value>
    </property>
    <property>
    <name>mapred.output.compression.type</name>
    <value>BLOCK</value>
    </property>
    <property>
    <name>mapred.output.compression.codec</name>
    <value>org.apache.hadoop.io.compress.DefaultCodec</value>
    </property>
    <property>
    <name>mapred.map.output.compression.codec</name>
    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
    </property>
    <property>
    <name>mapred.compress.map.output</name>
    <value>true</value>
    </property>
    <property>
    <name>zlib.compress.level</name>
    <value>DEFAULT_COMPRESSION</value>
    </property>
    <property>
    <name>io.sort.factor</name>
    <value>64</value>
    </property>
    <property>
    <name>io.sort.record.percent</name>
    <value>0.05</value>
    </property>
    <property>
    <name>io.sort.spill.percent</name>
    <value>0.8</value>
    </property>
    <property>
    <name>mapred.reduce.parallel.copies</name>
    <value>10</value>
    </property>
    <property>
    <name>mapred.submit.replication</name>
    <value>2</value>
    </property>
    <property>
    <name>mapred.reduce.tasks</name>
    <value>192</value>
    </property>
    <property>
    <name>mapred.userlog.retain.hours</name>
    <value>24</value>
    </property>
    <property>
    <name>io.sort.mb</name>
    <value>242</value>
    </property>
    <property>
    <name>mapred.child.java.opts</name>
    <value> -Xmx1019018840</value>
    </property>
    <property>
    <name>mapred.job.reuse.jvm.num.tasks</name>
    <value>1</value>
    </property>
    <property>
    <name>mapred.map.tasks.speculative.execution</name>
    <value>false</value>
    </property>
    <property>
    <name>mapred.reduce.tasks.speculative.execution</name>
    <value>false</value>
    </property>
    <property>
    <name>mapred.reduce.slowstart.completed.maps</name>
    <value>0.8</value>
    </property>
    <property>
    <name>mapreduce.jobtracker.kerberos.principal</name>
    <value>mapred/_HOST@HPCC.JP</value>
    </property>
    </configuration>


    [lili@itri-bd-s29 ~]$ sudo cat
    /var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hue.ini
    [desktop]
    secret_key=n7JjKfVdWNO0dzRh4c4JvOLkMUbANG
    http_host=itri-bd-s29.hpcc.jp
    ssl_certificate=/opt/cert/server-bd-s29.crt
    ssl_private_key=/opt/cert/server-bd-s29.key
    http_port=8888
    time_zone=Asia/Tokyo
    django_debug_mode=0
    http_500_debug_mode=0
    cherrypy_server_threads=10
    default_site_encoding=utf
    collect_usage=true
    [[auth]]
    user_augmentor=desktop.auth.backend.DefaultUserAugmentor
    backend=desktop.auth.backend.PamBackend
    [[ldap]]
    [[[users]]]
    [[[groups]]]
    [[database]]
    engine=sqlite3
    host=localhost
    port=3306
    user=hue
    password=
    name=/var/lib/hue/desktop.db
    [[smtp]]
    host=localhost
    port=25
    user=
    password=
    tls=no
    [[kerberos]]
    [hadoop]
    [[hdfs_clusters]]
    [[[default]]]
    fs_defaultfs=hdfs://nameservice1
    webhdfs_url=http://itri-bd-s29.hpcc.jp:14000/webhdfs/v1

    hadoop_hdfs_home=/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hadoop-hdfs

    hadoop_bin=/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hadoop/bin/hadoop

    hadoop_conf_dir=/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hadoop-conf
    security_enabled=true
    temp_dir=/tmp
    [[mapred_clusters]]
    [[[default]]]
    jobtracker_host=itri-bd-m01.hpcc.jp
    thrift_port=9290
    jobtracker_port=8021
    submit_to=true

    hadoop_mapred_home=/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hadoop-0.20-mapreduce

    hadoop_bin=/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hadoop/bin/hadoop

    hadoop_conf_dir=/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hadoop-conf
    security_enabled=true
    [beeswax]
    beeswax_server_host=itri-bd-s29.hpcc.jp
    beeswax_meta_server_port=8003
    beeswax_server_port=8002
    beeswax_server_conn_timeout=120
    metastore_conn_timeout=10
    beeswax_server_heapsize=256
    [jobsub]
    remote_data_dir=/user/hue/jobsub30
    oozie_url=http://itri-bd-m00.hpcc.jp:11000/oozie
    security_enabled=true
    [useradmin]
    [proxy]
    whitelist=(localhost|127\.0\.0\.1):(50030|50070|50060|50075)
    [shell]
    [[ shelltypes ]]
    [[[ pig ]]]
    nice_name="Pig Shell (Grunt)"
    command="/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/pig/../../bin/pig
    -l /dev/null"
    help="The command-line interpreter for Pig"
    [[[[ environment ]]]]
    [[[[[ JAVA_HOME ]]]]]
    value="/usr/java/jdk1.7.0_45"
    [[[[[ HADOOP_CONF_DIR ]]]]]

    value="/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hadoop-conf"
    [[[ hbase ]]]
    nice_name="HBase Shell"
    command="/opt/cloudera/parcels/CDH-4.6.0-1.cdh4.6.0.p0.26/lib/hbase/bin/hbase
    shell"
    help="The command-line HBase client interface."
    [[[[ environment ]]]]
    [[[[[ JAVA_HOME ]]]]]
    value="/usr/java/jdk1.7.0_45"
    [[[[[ HADOOP_CONF_DIR ]]]]]

    value="/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hadoop-conf"
    [[[[[ HBASE_CONF_DIR ]]]]]

    value="/var/run/cloudera-scm-agent/process/6171-hue-HUE_SERVER/hbase-conf"



    2014-03-24 15:06 GMT+09:00 Romain Rigaux <romain@cloudera.com>:

    Which Hue? Have you used the Job Tracker HA configuration?

    http://gethue.tumblr.com/post/71637613809/jobtracker-high-availability-ha-in-mr1

    Romain

    On Sun, Mar 23, 2014 at 6:24 PM, 李黎 wrote:

    Hi,

    I have found a bug of HUE.

    When The MapReduce service running at HA mode, the HBase shell can not
    be used from HUE weB UI.
    The Stabdby JobTracker´s IP address is used by mistack now, the
    correct answer shoud use Active JobTracker´s IP address.

    Thanks

    To unsubscribe from this group and stop receiving emails from it,
    send an email to scm-users+unsubscribe@cloudera.org.
    To unsubscribe from this group and stop receiving emails from it, send an email to hue-user+unsubscribe@cloudera.org.

Related Discussions

Discussion Navigation
viewthread | post
Discussion Overview
grouphue-user @
categorieshadoop
postedMar 24, '14 at 8:19a
activeMar 25, '14 at 5:43a
posts3
users1
websitecloudera.com
irc#hadoop

1 user in discussion

李黎: 3 posts

People

Translate

site design / logo © 2022 Grokbase