FAQ
Author: brock
Date: Tue Feb 18 02:18:36 2014
New Revision: 1569164

URL: http://svn.apache.org/r1569164
Log:
HIVE-6037 - Synchronize HiveConf with hive-default.xml.template and support show conf (Navis via Brock)

Added:
     hive/trunk/common/src/java/org/apache/hadoop/hive/ant/
     hive/trunk/common/src/java/org/apache/hadoop/hive/ant/GenHiveTemplate.java
     hive/trunk/common/src/java/org/apache/hadoop/hive/conf/Validator.java
     hive/trunk/common/src/java/org/apache/hive/common/util/SystemVariables.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowConfDesc.java
     hive/trunk/ql/src/test/queries/clientpositive/show_conf.q
     hive/trunk/ql/src/test/results/clientpositive/show_conf.q.out
Modified:
     hive/trunk/common/pom.xml
     hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
     hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
     hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java
     hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java
     hive/trunk/conf/hive-default.xml.template
     hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/jdbc/TestJdbcDriver.java
     hive/trunk/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/VariableSubstitution.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
     hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java

Modified: hive/trunk/common/pom.xml
URL: http://svn.apache.org/viewvc/hive/trunk/common/pom.xml?rev=1569164&r1=1569163&r2=1569164&view=diff
==============================================================================
--- hive/trunk/common/pom.xml (original)
+++ hive/trunk/common/pom.xml Tue Feb 18 02:18:36 2014
@@ -65,6 +65,11 @@
        <artifactId>commons-compress</artifactId>
        <version>${commons-compress.version}</version>
      </dependency>
+ <dependency>
+ <groupId>org.apache.ant</groupId>
+ <artifactId>ant</artifactId>
+ <version>${ant.version}</version>
+ </dependency>
      <!-- test inter-project -->
      <dependency>
        <groupId>junit</groupId>
@@ -106,6 +111,12 @@
    </profiles>

    <build>
+ <resources>
+ <resource>
+ <directory>../conf/</directory>
+ <includes><include>hive-default.xml.template</include></includes>
+ </resource>
+ </resources>
      <sourceDirectory>${basedir}/src/java</sourceDirectory>
      <testSourceDirectory>${basedir}/src/test</testSourceDirectory>
      <scriptSourceDirectory>${basedir}/src/scripts</scriptSourceDirectory>
@@ -136,6 +147,21 @@
                <goal>run</goal>
              </goals>
            </execution>
+ <execution>
+ <id>generate-template</id>
+ <phase>package</phase>
+ <configuration>
+ <target>
+ <property name="compile.classpath" refid="maven.runtime.classpath"/>
+ <taskdef name="templategen" classname="org.apache.hadoop.hive.ant.GenHiveTemplate"
+ classpath="${compile.classpath}"/>
+ <templategen templateFile="${basedir}/../conf/hive-default.xml.template"/>
+ </target>
+ </configuration>
+ <goals>
+ <goal>run</goal>
+ </goals>
+ </execution>
          </executions>
        </plugin>
        <plugin>

Added: hive/trunk/common/src/java/org/apache/hadoop/hive/ant/GenHiveTemplate.java
URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/ant/GenHiveTemplate.java?rev=1569164&view=auto
==============================================================================
--- hive/trunk/common/src/java/org/apache/hadoop/hive/ant/GenHiveTemplate.java (added)
+++ hive/trunk/common/src/java/org/apache/hadoop/hive/ant/GenHiveTemplate.java Tue Feb 18 02:18:36 2014
@@ -0,0 +1,164 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ant;
+
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.tools.ant.BuildException;
+import org.apache.tools.ant.Task;
+import org.w3c.dom.Document;
+import org.w3c.dom.Element;
+import org.w3c.dom.Text;
+
+import javax.xml.parsers.DocumentBuilder;
+import javax.xml.parsers.DocumentBuilderFactory;
+import javax.xml.transform.OutputKeys;
+import javax.xml.transform.Transformer;
+import javax.xml.transform.TransformerFactory;
+import javax.xml.transform.dom.DOMSource;
+import javax.xml.transform.stream.StreamResult;
+import java.io.File;
+import java.net.URL;
+
+/**
+ * Generates hive-default.xml.template from HiveConf.ConfVars
+ */
+public class GenHiveTemplate extends Task {
+
+ private String templateFile;
+
+ public String getTemplateFile() {
+ return templateFile;
+ }
+
+ public void setTemplateFile(String templateFile) {
+ this.templateFile = templateFile;
+ }
+
+ private void generate() throws Exception {
+ File current = new File(templateFile);
+ if (current.exists()) {
+ ClassLoader loader = GenHiveTemplate.class.getClassLoader();
+ URL url = loader.getResource("org/apache/hadoop/hive/conf/HiveConf.class");
+ if (url != null) {
+ File file = new File(url.getFile());
+ if (file.exists() && file.lastModified() < current.lastModified()) {
+ return;
+ }
+ }
+ }
+ writeToFile(current, generateTemplate());
+ }
+
+ private Document generateTemplate() throws Exception {
+ DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
+ DocumentBuilder docBuilder = dbf.newDocumentBuilder();
+ Document doc = docBuilder.newDocument();
+ doc.appendChild(doc.createProcessingInstruction(
+ "xml-stylesheet", "type=\"text/xsl\" href=\"configuration.xsl\""));
+
+ doc.appendChild(doc.createComment("\n" +
+ " Licensed to the Apache Software Foundation (ASF) under one or more\n" +
+ " contributor license agreements. See the NOTICE file distributed with\n" +
+ " this work for additional information regarding copyright ownership.\n" +
+ " The ASF licenses this file to You under the Apache License, Version 2.0\n" +
+ " (the \"License\"); you may not use this file except in compliance with\n" +
+ " the License. You may obtain a copy of the License at\n" +
+ "\n" +
+ " http://www.apache.org/licenses/LICENSE-2.0\n" +
+ "\n" +
+ " Unless required by applicable law or agreed to in writing, software\n" +
+ " distributed under the License is distributed on an \"AS IS\" BASIS,\n" +
+ " WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" +
+ " See the License for the specific language governing permissions and\n" +
+ " limitations under the License.\n"));
+
+ Element root = doc.createElement("configuration");
+ doc.appendChild(root);
+
+ root.appendChild(doc.createComment(
+ " WARNING!!! This file is auto generated for documentation purposes ONLY! "));
+ root.appendChild(doc.createComment(
+ " WARNING!!! Any changes you make to this file will be ignored by Hive. "));
+ root.appendChild(doc.createComment(
+ " WARNING!!! You must make your changes in hive-site.xml instead. "));
+
+ root.appendChild(doc.createComment(" Hive Execution Parameters "));
+
+ for (HiveConf.ConfVars confVars : HiveConf.ConfVars.values()) {
+ if (confVars.isExcluded()) {
+ // thought of creating template for each shims, but I couldn't generate proper mvn script
+ continue;
+ }
+ Element property = appendElement(root, "property", null);
+ appendElement(property, "key", confVars.varname);
+ appendElement(property, "value", confVars.getDefaultValue());
+ appendElement(property, "description", normalize(confVars.getDescription()));
+ // wish to add new line here.
+ }
+ return doc;
+ }
+
+ private String normalize(String description) {
+ int index = description.indexOf('\n');
+ if (index < 0) {
+ return description;
+ }
+ int prev = 0;
+ StringBuilder builder = new StringBuilder(description.length() << 1);
+ for (;index > 0; index = description.indexOf('\n', prev = index + 1)) {
+ builder.append("\n ").append(description.substring(prev, index));
+ }
+ builder.append("\n ");
+ return builder.toString();
+ }
+
+ private void writeToFile(File template, Document document) throws Exception {
+ Transformer transformer = TransformerFactory.newInstance().newTransformer();
+ transformer.setOutputProperty(OutputKeys.INDENT, "yes");
+ transformer.setOutputProperty("{http://xml.apache.org/xslt}indent-amount", "2");
+ DOMSource source = new DOMSource(document);
+ StreamResult result = new StreamResult(template);
+ transformer.transform(source, result);
+ }
+
+ private Element appendElement(Element parent, String name, String text) {
+ Document document = parent.getOwnerDocument();
+ Element child = document.createElement(name);
+ parent.appendChild(child);
+ if (text != null) {
+ Text textNode = document.createTextNode(text);
+ child.appendChild(textNode);
+ }
+ return child;
+ }
+
+ @Override
+ public void execute() throws BuildException {
+ try {
+ generate();
+ } catch (Exception e) {
+ throw new BuildException(e);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ GenHiveTemplate gen = new GenHiveTemplate();
+ gen.generate();
+ }
+}

Search Discussions

  • Brock at Feb 18, 2014 at 2:19 am
    Added: hive/trunk/common/src/java/org/apache/hadoop/hive/conf/Validator.java
    URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/Validator.java?rev=1569164&view=auto
    ==============================================================================
    --- hive/trunk/common/src/java/org/apache/hadoop/hive/conf/Validator.java (added)
    +++ hive/trunk/common/src/java/org/apache/hadoop/hive/conf/Validator.java Tue Feb 18 02:18:36 2014
    @@ -0,0 +1,159 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.hadoop.hive.conf;
    +
    +import java.util.ArrayList;
    +import java.util.LinkedHashSet;
    +import java.util.List;
    +import java.util.Set;
    +import java.util.regex.Pattern;
    +
    +/**
    + * validate value for a ConfVar, return non-null string for fail message
    + */
    +public interface Validator {
    +
    + String validate(String value);
    +
    + static class StringSet implements Validator {
    +
    + private final Set<String> expected = new LinkedHashSet<String>();
    +
    + public StringSet(String... values) {
    + for (String value : values) {
    + expected.add(value.toLowerCase());
    + }
    + }
    +
    + @Override
    + public String validate(String value) {
    + if (value == null || !expected.contains(value.toLowerCase())) {
    + return "Invalid value.. expects one of " + expected;
    + }
    + return null;
    + }
    + }
    +
    + static enum RANGE_TYPE {
    + INT {
    + @Override
    + protected boolean inRange(String value, Object lower, Object upper) {
    + int ivalue = Integer.parseInt(value);
    + return (Integer)lower <= ivalue && ivalue <= (Integer)upper;
    + }
    + },
    + LONG {
    + @Override
    + protected boolean inRange(String value, Object lower, Object upper) {
    + long lvalue = Long.parseLong(value);
    + return (Long)lower <= lvalue && lvalue <= (Long)upper;
    + }
    + },
    + FLOAT {
    + @Override
    + protected boolean inRange(String value, Object lower, Object upper) {
    + float fvalue = Float.parseFloat(value);
    + return (Float)lower <= fvalue && fvalue <= (Float)upper;
    + }
    + };
    +
    + public static RANGE_TYPE valueOf(Object lower, Object upper) {
    + if (lower instanceof Integer && upper instanceof Integer) {
    + assert (Integer)lower < (Integer)upper;
    + return INT;
    + } else if (lower instanceof Long && upper instanceof Long) {
    + assert (Long)lower < (Long)upper;
    + return LONG;
    + } else if (lower instanceof Float && upper instanceof Float) {
    + assert (Float)lower < (Float)upper;
    + return FLOAT;
    + }
    + throw new IllegalArgumentException("invalid range from " + lower + " to " + upper);
    + }
    +
    + protected abstract boolean inRange(String value, Object lower, Object upper);
    + }
    +
    + static class RangeValidator implements Validator {
    +
    + private final RANGE_TYPE type;
    + private final Object lower, upper;
    +
    + public RangeValidator(Object lower, Object upper) {
    + this.lower = lower;
    + this.upper = upper;
    + this.type = RANGE_TYPE.valueOf(lower, upper);
    + }
    +
    + @Override
    + public String validate(String value) {
    + try {
    + if (value == null) {
    + return "Value cannot be null";
    + }
    + if (!type.inRange(value.trim(), lower, upper)) {
    + return "Invalid value " + value + ", which should be in between " + lower + " and " + upper;
    + }
    + } catch (Exception e) {
    + return e.toString();
    + }
    + return null;
    + }
    + }
    +
    + static class PatternSet implements Validator {
    +
    + private final List<Pattern> expected = new ArrayList<Pattern>();
    +
    + public PatternSet(String... values) {
    + for (String value : values) {
    + expected.add(Pattern.compile(value));
    + }
    + }
    +
    + @Override
    + public String validate(String value) {
    + if (value == null) {
    + return "Invalid value.. expects one of patterns " + expected;
    + }
    + for (Pattern pattern : expected) {
    + if (pattern.matcher(value).matches()) {
    + return null;
    + }
    + }
    + return "Invalid value.. expects one of patterns " + expected;
    + }
    + }
    +
    + static class RatioValidator implements Validator {
    +
    + @Override
    + public String validate(String value) {
    + try {
    + float fvalue = Float.valueOf(value);
    + if (fvalue <= 0 || fvalue >= 1) {
    + return "Invalid ratio " + value + ", which should be in between 0 to 1";
    + }
    + } catch (NumberFormatException e) {
    + return e.toString();
    + }
    + return null;
    + }
    + }
    +}

    Added: hive/trunk/common/src/java/org/apache/hive/common/util/SystemVariables.java
    URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hive/common/util/SystemVariables.java?rev=1569164&view=auto
    ==============================================================================
    --- hive/trunk/common/src/java/org/apache/hive/common/util/SystemVariables.java (added)
    +++ hive/trunk/common/src/java/org/apache/hive/common/util/SystemVariables.java Tue Feb 18 02:18:36 2014
    @@ -0,0 +1,83 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.hive.common.util;
    +
    +import java.util.regex.Matcher;
    +import java.util.regex.Pattern;
    +
    +import org.apache.commons.logging.Log;
    +import org.apache.commons.logging.LogFactory;
    +import org.apache.hadoop.hive.conf.HiveConf;
    +import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
    +
    +public class SystemVariables {
    +
    + private static final Log l4j = LogFactory.getLog(SystemVariables.class);
    + protected static Pattern varPat = Pattern.compile("\\$\\{[^\\}\\$\u0020]+\\}");
    +
    + public static final String ENV_PREFIX = "env:";
    + public static final String SYSTEM_PREFIX = "system:";
    + public static final String HIVECONF_PREFIX = "hiveconf:";
    + public static final String HIVEVAR_PREFIX = "hivevar:";
    + public static final String SET_COLUMN_NAME = "set";
    +
    + protected String getSubstitute(HiveConf conf, String var) {
    + String val = null;
    + try {
    + if (var.startsWith(SYSTEM_PREFIX)) {
    + val = System.getProperty(var.substring(SYSTEM_PREFIX.length()));
    + }
    + } catch(SecurityException se) {
    + l4j.warn("Unexpected SecurityException in Configuration", se);
    + }
    + if (val == null) {
    + if (var.startsWith(ENV_PREFIX)) {
    + val = System.getenv(var.substring(ENV_PREFIX.length()));
    + }
    + }
    + return val;
    + }
    +
    + public String substitute(HiveConf conf, String expr) {
    + int depth = conf.getIntVar(ConfVars.HIVEVARIABLESUBSTITUTEDEPTH);
    + return substitute(conf, expr, depth);
    + }
    +
    + public String substitute(HiveConf conf, String expr, int depth) {
    + Matcher match = varPat.matcher("");
    + String eval = expr;
    + for(int s = 0; s < depth; s++) {
    + match.reset(eval);
    + if (!match.find()) {
    + return eval;
    + }
    + String var = match.group();
    + var = var.substring(2, var.length()-1); // remove ${ .. }
    + String val = getSubstitute(conf, var);
    +
    + if (val == null) {
    + l4j.debug("Interpolation result: " + eval);
    + return eval; // return literal, no substitution found
    + }
    + // substitute
    + eval = eval.substring(0, match.start()) + val + eval.substring(match.end());
    + }
    + throw new IllegalStateException("Variable substitution depth too large: "
    + + conf.getIntVar(ConfVars.HIVEVARIABLESUBSTITUTEDEPTH) + " " + expr);
    + }
    +}

    Modified: hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java
    URL: http://svn.apache.org/viewvc/hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java (original)
    +++ hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java Tue Feb 18 02:18:36 2014
    @@ -18,7 +18,6 @@
      package org.apache.hadoop.hive.conf;

      import org.apache.hadoop.conf.Configuration;
    -import org.apache.hadoop.fs.Path;
      import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
      import org.apache.hive.common.util.HiveTestUtils;
      import org.junit.Assert;
    @@ -43,7 +42,7 @@ public class TestHiveConf {
        }

        private void checkConfVar(ConfVars var, String expectedConfVarVal) throws Exception {
    - Assert.assertEquals(expectedConfVarVal, var.defaultVal);
    + Assert.assertEquals(expectedConfVarVal, var.getDefaultValue());
        }

        private void checkHiveConf(String name, String expectedHiveVal) throws Exception {
    @@ -80,7 +79,7 @@ public class TestHiveConf {
          checkHiveConf("test.property1", "hive-site.xml");

          // Test HiveConf property variable substitution in hive-site.xml
    - checkHiveConf("test.var.hiveconf.property", ConfVars.DEFAULTPARTITIONNAME.defaultVal);
    + checkHiveConf("test.var.hiveconf.property", ConfVars.DEFAULTPARTITIONNAME.getDefaultValue());
        }

        @Test

    Modified: hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java
    URL: http://svn.apache.org/viewvc/hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java (original)
    +++ hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java Tue Feb 18 02:18:36 2014
    @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.conf;

      import junit.framework.TestCase;

    -import org.apache.hadoop.hive.conf.HiveConf;
      import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
      import org.junit.Test;


    Modified: hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java
    URL: http://svn.apache.org/viewvc/hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java (original)
    +++ hive/trunk/common/src/test/org/apache/hadoop/hive/conf/TestHiveLogging.java Tue Feb 18 02:18:36 2014
    @@ -18,7 +18,6 @@
      package org.apache.hadoop.hive.conf;

      import java.io.BufferedReader;
    -import java.io.IOException;
      import java.io.InputStreamReader;

      import junit.framework.TestCase;
    @@ -26,7 +25,6 @@ import junit.framework.TestCase;
      import org.apache.hadoop.hive.common.LogUtils;
      import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
      import org.apache.hive.common.util.HiveTestUtils;
    -import org.apache.hadoop.hive.conf.HiveConf.ConfVars;

      /**
       * TestHiveLogging
  • Brock at Feb 18, 2014 at 2:19 am
    Modified: hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/jdbc/TestJdbcDriver.java
    URL: http://svn.apache.org/viewvc/hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/jdbc/TestJdbcDriver.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/jdbc/TestJdbcDriver.java (original)
    +++ hive/trunk/itests/hive-unit/src/test/java/org/apache/hadoop/hive/jdbc/TestJdbcDriver.java Tue Feb 18 02:18:36 2014
    @@ -19,7 +19,7 @@
      package org.apache.hadoop.hive.jdbc;

      import static org.apache.hadoop.hive.ql.exec.ExplainTask.EXPL_COLUMN_NAME;
    -import static org.apache.hadoop.hive.ql.processors.SetProcessor.SET_COLUMN_NAME;
    +import static org.apache.hive.common.util.SystemVariables.SET_COLUMN_NAME;

      import java.sql.Connection;
      import java.sql.DatabaseMetaData;

    Modified: hive/trunk/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
    URL: http://svn.apache.org/viewvc/hive/trunk/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java (original)
    +++ hive/trunk/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java Tue Feb 18 02:18:36 2014
    @@ -19,7 +19,7 @@
      package org.apache.hive.jdbc;

      import static org.apache.hadoop.hive.ql.exec.ExplainTask.EXPL_COLUMN_NAME;
    -import static org.apache.hadoop.hive.ql.processors.SetProcessor.SET_COLUMN_NAME;
    +import static org.apache.hive.common.util.SystemVariables.SET_COLUMN_NAME;
      import static org.junit.Assert.assertEquals;
      import static org.junit.Assert.assertFalse;
      import static org.junit.Assert.assertNotNull;
    @@ -1855,7 +1855,7 @@ public class TestJdbcDriver2 {
         * @throws Exception
         */
        public void testFetchFirstSetCmds() throws Exception {
    - execFetchFirst("set -v", SetProcessor.SET_COLUMN_NAME, false);
    + execFetchFirst("set -v", SET_COLUMN_NAME, false);
        }

        /**

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java Tue Feb 18 02:18:36 2014
    @@ -138,6 +138,7 @@ import org.apache.hadoop.hive.ql.plan.Re
      import org.apache.hadoop.hive.ql.plan.RevokeDesc;
      import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
      import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc;
    +import org.apache.hadoop.hive.ql.plan.ShowConfDesc;
      import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc;
      import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
      import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
    @@ -176,7 +177,6 @@ import org.apache.hadoop.hive.shims.Shim
      import org.apache.hadoop.io.IOUtils;
      import org.apache.hadoop.util.ReflectionUtils;
      import org.apache.hadoop.util.ToolRunner;
    -import org.apache.thrift.TException;
      import org.stringtemplate.v4.ST;

      /**
    @@ -399,6 +399,11 @@ public class DDLTask extends Task<DDLWor
              return showCreateTable(db, showCreateTbl);
            }

    + ShowConfDesc showConf = work.getShowConfDesc();
    + if (showConf != null) {
    + return showConf(db, showConf);
    + }
    +
            RoleDDLDesc roleDDLDesc = work.getRoleDDLDesc();
            if (roleDDLDesc != null) {
              return roleDDL(roleDDLDesc);
    @@ -461,6 +466,38 @@ public class DDLTask extends Task<DDLWor
          return 0;
        }

    + private int showConf(Hive db, ShowConfDesc showConf) throws Exception {
    + ConfVars conf = HiveConf.getConfVars(showConf.getConfName());
    + if (conf == null) {
    + throw new HiveException("invalid configuration name " + showConf.getConfName());
    + }
    + String description = conf.getDescription();
    + String defaltValue = conf.getDefaultValue();
    + DataOutputStream output = getOutputStream(showConf.getResFile());
    + try {
    + if (description != null) {
    + if (defaltValue != null) {
    + output.write(defaltValue.getBytes());
    + }
    + output.write(separator);
    + output.write(conf.typeString().getBytes());
    + output.write(separator);
    + if (description != null) {
    + output.write(description.replaceAll(" *\n *", " ").getBytes());
    + }
    + output.write(terminator);
    + }
    + } finally {
    + output.close();
    + }
    + return 0;
    + }
    +
    + private DataOutputStream getOutputStream(Path outputFile) throws Exception {
    + FileSystem fs = outputFile.getFileSystem(conf);
    + return fs.create(outputFile);
    + }
    +
        /**
         * First, make sure the source table/partition is not
         * archived/indexes/non-rcfile. If either of these is true, throw an

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java Tue Feb 18 02:18:36 2014
    @@ -72,7 +72,6 @@ import java.util.Set;
      import java.util.UUID;
      import java.util.zip.Deflater;
      import java.util.zip.DeflaterOutputStream;
    -import java.util.zip.Inflater;
      import java.util.zip.InflaterInputStream;
      import java.util.concurrent.ConcurrentHashMap;
      import java.util.concurrent.ExecutionException;

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcFile.java Tue Feb 18 02:18:36 2014
    @@ -27,6 +27,8 @@ import org.apache.hadoop.hive.conf.HiveC
      import org.apache.hadoop.hive.ql.io.orc.Reader.FileMetaInfo;
      import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;

    +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.*;
    +
      /**
       * Contains factory methods to read or write ORC files.
       */
    @@ -142,26 +144,12 @@ public final class OrcFile {
          WriterOptions(Configuration conf) {
            configuration = conf;
            memoryManagerValue = getMemoryManager(conf);
    - stripeSizeValue =
    - conf.getLong(HiveConf.ConfVars.HIVE_ORC_DEFAULT_STRIPE_SIZE.varname,
    - HiveConf.ConfVars.HIVE_ORC_DEFAULT_STRIPE_SIZE.defaultLongVal);
    - rowIndexStrideValue =
    - conf.getInt(HiveConf.ConfVars.HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE
    - .varname, HiveConf.ConfVars.HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE.defaultIntVal);
    - bufferSizeValue =
    - conf.getInt(HiveConf.ConfVars.HIVE_ORC_DEFAULT_BUFFER_SIZE.varname,
    - HiveConf.ConfVars.HIVE_ORC_DEFAULT_BUFFER_SIZE.defaultIntVal);
    - blockPaddingValue =
    - conf.getBoolean(HiveConf.ConfVars.HIVE_ORC_DEFAULT_BLOCK_PADDING
    - .varname, HiveConf.ConfVars.HIVE_ORC_DEFAULT_BLOCK_PADDING
    - .defaultBoolVal);
    - compressValue =
    - CompressionKind.valueOf(conf.get(HiveConf.ConfVars
    - .HIVE_ORC_DEFAULT_COMPRESS.varname,
    - HiveConf.ConfVars
    - .HIVE_ORC_DEFAULT_COMPRESS.defaultVal));
    - String versionName =
    - conf.get(HiveConf.ConfVars.HIVE_ORC_WRITE_FORMAT.varname);
    + stripeSizeValue = HiveConf.getLongVar(conf, HIVE_ORC_DEFAULT_STRIPE_SIZE);
    + rowIndexStrideValue = HiveConf.getIntVar(conf, HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE);
    + bufferSizeValue = HiveConf.getIntVar(conf, HIVE_ORC_DEFAULT_BUFFER_SIZE);
    + blockPaddingValue = HiveConf.getBoolVar(conf, HIVE_ORC_DEFAULT_BLOCK_PADDING);
    + compressValue = CompressionKind.valueOf(HiveConf.getVar(conf, HIVE_ORC_DEFAULT_COMPRESS));
    + String versionName = HiveConf.getVar(conf, HIVE_ORC_WRITE_FORMAT);
            if (versionName == null) {
              versionValue = Version.CURRENT;
            } else {

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java Tue Feb 18 02:18:36 2014
    @@ -29,7 +29,6 @@ import org.apache.hadoop.hive.ql.securit
      import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
      import org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider;
      import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
    -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer;
      import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizerFactory;
      import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory;
      import org.apache.hadoop.io.Text;

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java Tue Feb 18 02:18:36 2014
    @@ -109,6 +109,7 @@ import org.apache.hadoop.hive.ql.plan.Pl
      import org.apache.hadoop.hive.ql.plan.RenamePartitionDesc;
      import org.apache.hadoop.hive.ql.plan.RoleDDLDesc;
      import org.apache.hadoop.hive.ql.plan.ShowColumnsDesc;
    +import org.apache.hadoop.hive.ql.plan.ShowConfDesc;
      import org.apache.hadoop.hive.ql.plan.ShowCreateTableDesc;
      import org.apache.hadoop.hive.ql.plan.ShowDatabasesDesc;
      import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc;
    @@ -310,6 +311,10 @@ public class DDLSemanticAnalyzer extends
            ctx.setResFile(ctx.getLocalTmpPath());
            analyzeShowDbLocks(ast);
            break;
    + case HiveParser.TOK_SHOWCONF:
    + ctx.setResFile(ctx.getLocalTmpPath());
    + analyzeShowConf(ast);
    + break;
          case HiveParser.TOK_DESCFUNCTION:
            ctx.setResFile(ctx.getLocalTmpPath());
            analyzeDescFunction(ast);
    @@ -2190,6 +2195,14 @@ public class DDLSemanticAnalyzer extends
          ctx.setNeedLockMgr(true);
        }

    + private void analyzeShowConf(ASTNode ast) throws SemanticException {
    + String confName = stripQuotes(ast.getChild(0).getText());
    + ShowConfDesc showConfDesc = new ShowConfDesc(ctx.getResFile(), confName);
    + rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
    + showConfDesc), conf));
    + setFetchTask(createFetchTask(showConfDesc.getSchema()));
    + }
    +
        /**
         * Add the task according to the parsed command tree. This is used for the CLI
         * command "LOCK TABLE ..;".

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g Tue Feb 18 02:18:36 2014
    @@ -285,6 +285,7 @@ KW_ROLES: 'ROLES';
      KW_INNER: 'INNER';
      KW_EXCHANGE: 'EXCHANGE';
      KW_ADMIN: 'ADMIN';
    +KW_CONF: 'CONF';

      // Operators
      // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work.

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g Tue Feb 18 02:18:36 2014
    @@ -163,6 +163,7 @@ TOK_SHOW_CREATETABLE;
      TOK_SHOW_TABLESTATUS;
      TOK_SHOW_TBLPROPERTIES;
      TOK_SHOWLOCKS;
    +TOK_SHOWCONF;
      TOK_LOCKTABLE;
      TOK_UNLOCKTABLE;
      TOK_LOCKDB;
    @@ -1290,6 +1291,8 @@ showStatement
    KW_SHOW KW_LOCKS KW_DATABASE (dbName=Identifier) (isExtended=KW_EXTENDED)? -> ^(TOK_SHOWDBLOCKS $dbName $isExtended?)
    KW_SHOW (showOptions=KW_FORMATTED)? (KW_INDEX|KW_INDEXES) KW_ON showStmtIdentifier ((KW_FROM|KW_IN) db_name=identifier)?
          -> ^(TOK_SHOWINDEXES showStmtIdentifier $showOptions? $db_name?)
    + | KW_SHOW KW_CONF StringLiteral
    + -> ^(TOK_SHOWCONF StringLiteral)
          ;

      lockStatement

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzerFactory.java Tue Feb 18 02:18:36 2014
    @@ -69,6 +69,7 @@ public final class SemanticAnalyzerFacto
          commandType.put(HiveParser.TOK_SHOWPARTITIONS, HiveOperation.SHOWPARTITIONS);
          commandType.put(HiveParser.TOK_SHOWLOCKS, HiveOperation.SHOWLOCKS);
          commandType.put(HiveParser.TOK_SHOWDBLOCKS, HiveOperation.SHOWLOCKS);
    + commandType.put(HiveParser.TOK_SHOWCONF, HiveOperation.SHOWCONF);
          commandType.put(HiveParser.TOK_CREATEFUNCTION, HiveOperation.CREATEFUNCTION);
          commandType.put(HiveParser.TOK_DROPFUNCTION, HiveOperation.DROPFUNCTION);
          commandType.put(HiveParser.TOK_CREATEMACRO, HiveOperation.CREATEMACRO);
    @@ -193,6 +194,7 @@ public final class SemanticAnalyzerFacto
            case HiveParser.TOK_SHOWINDEXES:
            case HiveParser.TOK_SHOWLOCKS:
            case HiveParser.TOK_SHOWDBLOCKS:
    + case HiveParser.TOK_SHOWCONF:
            case HiveParser.TOK_CREATEINDEX:
            case HiveParser.TOK_DROPINDEX:
            case HiveParser.TOK_ALTERTABLE_CLUSTER_SORT:

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/VariableSubstitution.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/VariableSubstitution.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/VariableSubstitution.java (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/parse/VariableSubstitution.java Tue Feb 18 02:18:36 2014
    @@ -17,79 +17,48 @@
       */
      package org.apache.hadoop.hive.ql.parse;

    -import java.util.regex.Matcher;
    -import java.util.regex.Pattern;
    -
      import org.apache.commons.logging.Log;
      import org.apache.commons.logging.LogFactory;
      import org.apache.hadoop.hive.conf.HiveConf;
      import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
    -import org.apache.hadoop.hive.ql.processors.SetProcessor;
      import org.apache.hadoop.hive.ql.session.SessionState;
    +import org.apache.hive.common.util.SystemVariables;
    +
    +import java.util.Map;

    -public class VariableSubstitution {
    +public class VariableSubstitution extends SystemVariables {

        private static final Log l4j = LogFactory.getLog(VariableSubstitution.class);
    - protected static Pattern varPat = Pattern.compile("\\$\\{[^\\}\\$\u0020]+\\}");

    - private String getSubstitute(HiveConf conf, String var) {
    - String val = null;
    - try {
    - if (var.startsWith(SetProcessor.SYSTEM_PREFIX)) {
    - val = System.getProperty(var.substring(SetProcessor.SYSTEM_PREFIX.length()));
    - }
    - } catch(SecurityException se) {
    - l4j.warn("Unexpected SecurityException in Configuration", se);
    - }
    - if (val ==null){
    - if (var.startsWith(SetProcessor.ENV_PREFIX)){
    - val = System.getenv(var.substring(SetProcessor.ENV_PREFIX.length()));
    - }
    - }
    + @Override
    + protected String getSubstitute(HiveConf conf, String var) {
    + String val = super.getSubstitute(conf, var);
          if (val == null) {
    - if (var.startsWith(SetProcessor.HIVECONF_PREFIX)){
    - val = conf.get(var.substring(SetProcessor.HIVECONF_PREFIX.length()));
    + if (var.startsWith(HIVECONF_PREFIX)) {
    + val = conf.get(var.substring(HIVECONF_PREFIX.length()));
            }
          }
    - if (val ==null){
    - if(var.startsWith(SetProcessor.HIVEVAR_PREFIX)){
    - val = SessionState.get().getHiveVariables().get(var.substring(SetProcessor.HIVEVAR_PREFIX.length()));
    + if (val == null){
    + Map<String,String> vars = SessionState.get().getHiveVariables();
    + if (var.startsWith(HIVEVAR_PREFIX)) {
    + val = vars.get(var.substring(HIVEVAR_PREFIX.length()));
            } else {
    - val = SessionState.get().getHiveVariables().get(var);
    + val = vars.get(var);
            }
          }
          return val;
        }

    + @Override
        public String substitute (HiveConf conf, String expr) {
    -
    - if (conf.getBoolVar(ConfVars.HIVEVARIABLESUBSTITUTE)){
    - l4j.debug("Substitution is on: "+expr);
    + if (conf.getBoolVar(ConfVars.HIVEVARIABLESUBSTITUTE)) {
    + l4j.debug("Substitution is on: " + expr);
          } else {
            return expr;
          }
          if (expr == null) {
            return null;
          }
    - Matcher match = varPat.matcher("");
    - String eval = expr;
    - for(int s=0;s<conf.getIntVar(ConfVars.HIVEVARIABLESUBSTITUTEDEPTH); s++) {
    - match.reset(eval);
    - if (!match.find()) {
    - return eval;
    - }
    - String var = match.group();
    - var = var.substring(2, var.length()-1); // remove ${ .. }
    - String val = getSubstitute(conf, var);
    -
    - if (val == null) {
    - l4j.debug("Interpolation result: "+eval);
    - return eval; // return literal, no substitution found
    - }
    - // substitute
    - eval = eval.substring(0, match.start())+val+eval.substring(match.end());
    - }
    - throw new IllegalStateException("Variable substitution depth too large: "
    - + conf.getIntVar(ConfVars.HIVEVARIABLESUBSTITUTEDEPTH) + " " + expr);
    + return super.substitute(conf, expr);
        }
      }

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java Tue Feb 18 02:18:36 2014
    @@ -74,6 +74,8 @@ public class DDLWork implements Serializ
        private RevokeDesc revokeDesc;
        private GrantRevokeRoleDDL grantRevokeRoleDDL;

    + private ShowConfDesc showConfDesc;
    +
        boolean needLock = false;

        /**
    @@ -137,6 +139,12 @@ public class DDLWork implements Serializ
          this.truncateTblDesc = truncateTblDesc;
        }

    + public DDLWork(HashSet<ReadEntity> inputs, HashSet<WriteEntity> outputs,
    + ShowConfDesc showConfDesc) {
    + this(inputs, outputs);
    + this.showConfDesc = showConfDesc;
    + }
    +
        public DescDatabaseDesc getDescDatabaseDesc() {
          return descDbDesc;
        }
    @@ -1085,4 +1093,12 @@ public class DDLWork implements Serializ
            AlterTableExchangePartition alterTableExchangePartition) {
          this.alterTableExchangePartition = alterTableExchangePartition;
        }
    +
    + public ShowConfDesc getShowConfDesc() {
    + return showConfDesc;
    + }
    +
    + public void setShowConfDesc(ShowConfDesc showConfDesc) {
    + this.showConfDesc = showConfDesc;
    + }
      }

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/HiveOperation.java Tue Feb 18 02:18:36 2014
    @@ -66,6 +66,7 @@ public enum HiveOperation {
        SHOWINDEXES("SHOWINDEXES", null, null),
        SHOWPARTITIONS("SHOWPARTITIONS", null, null),
        SHOWLOCKS("SHOWLOCKS", null, null),
    + SHOWCONF("SHOWCONF", null, null),
        CREATEFUNCTION("CREATEFUNCTION", null, null),
        DROPFUNCTION("DROPFUNCTION", null, null),
        CREATEMACRO("CREATEMACRO", null, null),

    Added: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowConfDesc.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowConfDesc.java?rev=1569164&view=auto
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowConfDesc.java (added)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/plan/ShowConfDesc.java Tue Feb 18 02:18:36 2014
    @@ -0,0 +1,61 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.hadoop.hive.ql.plan;
    +
    +import org.apache.hadoop.fs.Path;
    +
    +import java.io.Serializable;
    +
    +public class ShowConfDesc extends DDLDesc implements Serializable {
    + private static final long serialVersionUID = 1L;
    +
    + private Path resFile;
    + private String confName;
    +
    + private static final String schema = "default,type,desc#string,string,string";
    +
    + public String getSchema() {
    + return schema;
    + }
    +
    + public ShowConfDesc() {
    + }
    +
    + public ShowConfDesc(Path resFile, String confName) {
    + this.resFile = resFile;
    + this.confName = confName;
    + }
    +
    + @Explain(displayName = "result file", normalExplain = false)
    + public Path getResFile() {
    + return resFile;
    + }
    +
    + public void setResFile(Path resFile) {
    + this.resFile = resFile;
    + }
    +
    + @Explain(displayName = "conf name", normalExplain = false)
    + public String getConfName() {
    + return confName;
    + }
    +
    + public void setConfName(String confName) {
    + this.confName = confName;
    + }
    +}

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java Tue Feb 18 02:18:36 2014
    @@ -22,6 +22,8 @@ import static org.apache.hadoop.hive.ser
      import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME;
      import static org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe.defaultNullString;

    +import static org.apache.hive.common.util.SystemVariables.*;
    +
      import java.util.Map;
      import java.util.Properties;
      import java.util.SortedMap;
    @@ -39,12 +41,7 @@ import org.apache.hadoop.hive.ql.session
       */
      public class SetProcessor implements CommandProcessor {

    - private static String prefix = "set: ";
    - public static final String ENV_PREFIX = "env:";
    - public static final String SYSTEM_PREFIX = "system:";
    - public static final String HIVECONF_PREFIX = "hiveconf:";
    - public static final String HIVEVAR_PREFIX = "hivevar:";
    - public static final String SET_COLUMN_NAME = "set";
    + private static final String prefix = "set: ";

        public static boolean getBoolean(String value) {
          if (value.equals("on") || value.equals("true")) {
    @@ -69,7 +66,7 @@ public class SetProcessor implements Com

          // Inserting hive variables
          for (String s : ss.getHiveVariables().keySet()) {
    - sortedMap.put(SetProcessor.HIVEVAR_PREFIX + s, ss.getHiveVariables().get(s));
    + sortedMap.put(HIVEVAR_PREFIX + s, ss.getHiveVariables().get(s));
          }

          for (Map.Entry<String, String> entries : sortedMap.entrySet()) {
    @@ -108,23 +105,23 @@ public class SetProcessor implements Com
          if (varvalue.contains("\n")){
            ss.err.println("Warning: Value had a \\n character in it.");
          }
    - if (varname.startsWith(SetProcessor.ENV_PREFIX)){
    + if (varname.startsWith(ENV_PREFIX)){
            ss.err.println("env:* variables can not be set.");
            return new CommandProcessorResponse(1);
    - } else if (varname.startsWith(SetProcessor.SYSTEM_PREFIX)){
    - String propName = varname.substring(SetProcessor.SYSTEM_PREFIX.length());
    + } else if (varname.startsWith(SYSTEM_PREFIX)){
    + String propName = varname.substring(SYSTEM_PREFIX.length());
            System.getProperties().setProperty(propName, new VariableSubstitution().substitute(ss.getConf(),varvalue));
            return new CommandProcessorResponse(0);
    - } else if (varname.startsWith(SetProcessor.HIVECONF_PREFIX)){
    - String propName = varname.substring(SetProcessor.HIVECONF_PREFIX.length());
    + } else if (varname.startsWith(HIVECONF_PREFIX)){
    + String propName = varname.substring(HIVECONF_PREFIX.length());
            try {
              setConf(varname, propName, varvalue, false);
              return new CommandProcessorResponse(0);
            } catch (IllegalArgumentException e) {
              return new CommandProcessorResponse(1, e.getMessage(), "42000");
            }
    - } else if (varname.startsWith(SetProcessor.HIVEVAR_PREFIX)) {
    - String propName = varname.substring(SetProcessor.HIVEVAR_PREFIX.length());
    + } else if (varname.startsWith(HIVEVAR_PREFIX)) {
    + String propName = varname.substring(HIVEVAR_PREFIX.length());
            ss.getHiveVariables().put(propName, new VariableSubstitution().substitute(ss.getConf(),varvalue));
            return new CommandProcessorResponse(0);
          } else {
    @@ -169,7 +166,7 @@ public class SetProcessor implements Com

        private SortedMap<String,String> propertiesToSortedMap(Properties p){
          SortedMap<String,String> sortedPropMap = new TreeMap<String,String>();
    - for (Map.Entry<Object, Object> entry :System.getProperties().entrySet() ){
    + for (Map.Entry<Object, Object> entry : p.entrySet() ){
            sortedPropMap.put( (String) entry.getKey(), (String) entry.getValue());
          }
          return sortedPropMap;
    @@ -188,38 +185,38 @@ public class SetProcessor implements Com
            ss.out.println("silent" + "=" + ss.getIsSilent());
            return createProcessorSuccessResponse();
          }
    - if (varname.startsWith(SetProcessor.SYSTEM_PREFIX)){
    - String propName = varname.substring(SetProcessor.SYSTEM_PREFIX.length());
    + if (varname.startsWith(SYSTEM_PREFIX)) {
    + String propName = varname.substring(SYSTEM_PREFIX.length());
            String result = System.getProperty(propName);
    - if (result != null){
    - ss.out.println(SetProcessor.SYSTEM_PREFIX+propName + "=" + result);
    + if (result != null) {
    + ss.out.println(SYSTEM_PREFIX + propName + "=" + result);
              return createProcessorSuccessResponse();
            } else {
    - ss.out.println( propName + " is undefined as a system property");
    + ss.out.println(propName + " is undefined as a system property");
              return new CommandProcessorResponse(1);
            }
    - } else if (varname.indexOf(SetProcessor.ENV_PREFIX)==0){
    + } else if (varname.indexOf(ENV_PREFIX) == 0) {
            String var = varname.substring(ENV_PREFIX.length());
    - if (System.getenv(var)!=null){
    - ss.out.println(SetProcessor.ENV_PREFIX+var + "=" + System.getenv(var));
    + if (System.getenv(var) != null) {
    + ss.out.println(ENV_PREFIX + var + "=" + System.getenv(var));
              return createProcessorSuccessResponse();
            } else {
              ss.out.println(varname + " is undefined as an environmental variable");
              return new CommandProcessorResponse(1);
            }
    - } else if (varname.indexOf(SetProcessor.HIVECONF_PREFIX)==0) {
    - String var = varname.substring(SetProcessor.HIVECONF_PREFIX.length());
    - if (ss.getConf().get(var)!=null){
    - ss.out.println(SetProcessor.HIVECONF_PREFIX+var + "=" + ss.getConf().get(var));
    + } else if (varname.indexOf(HIVECONF_PREFIX) == 0) {
    + String var = varname.substring(HIVECONF_PREFIX.length());
    + if (ss.getConf().get(var) != null) {
    + ss.out.println(HIVECONF_PREFIX + var + "=" + ss.getConf().get(var));
              return createProcessorSuccessResponse();
            } else {
              ss.out.println(varname + " is undefined as a hive configuration variable");
              return new CommandProcessorResponse(1);
            }
    - } else if (varname.indexOf(SetProcessor.HIVEVAR_PREFIX)==0) {
    - String var = varname.substring(SetProcessor.HIVEVAR_PREFIX.length());
    - if (ss.getHiveVariables().get(var)!=null){
    - ss.out.println(SetProcessor.HIVEVAR_PREFIX+var + "=" + ss.getHiveVariables().get(var));
    + } else if (varname.indexOf(HIVEVAR_PREFIX) == 0) {
    + String var = varname.substring(HIVEVAR_PREFIX.length());
    + if (ss.getHiveVariables().get(var) != null) {
    + ss.out.println(HIVEVAR_PREFIX + var + "=" + ss.getHiveVariables().get(var));
              return createProcessorSuccessResponse();
            } else {
              ss.out.println(varname + " is undefined as a hive variable");

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/HiveOperationType.java Tue Feb 18 02:18:36 2014
    @@ -69,6 +69,7 @@ public enum HiveOperationType {
        SHOWINDEXES,
        SHOWPARTITIONS,
        SHOWLOCKS,
    + SHOWCONF,
        CREATEFUNCTION,
        DROPFUNCTION,
        CREATEMACRO,

    Modified: hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java (original)
    +++ hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/Operation2Privilege.java Tue Feb 18 02:18:36 2014
    @@ -160,6 +160,7 @@ public class Operation2Privilege {
          op2Priv.put(HiveOperationType.SHOWINDEXES, new InOutPrivs(null, null));
          op2Priv.put(HiveOperationType.SHOWPARTITIONS, new InOutPrivs(null, null));
          op2Priv.put(HiveOperationType.SHOWLOCKS, new InOutPrivs(null, null));
    + op2Priv.put(HiveOperationType.SHOWCONF, new InOutPrivs(null, null));
          op2Priv.put(HiveOperationType.CREATEFUNCTION, new InOutPrivs(null, null));
          op2Priv.put(HiveOperationType.DROPFUNCTION, new InOutPrivs(null, null));
          op2Priv.put(HiveOperationType.CREATEMACRO, new InOutPrivs(null, null));

    Added: hive/trunk/ql/src/test/queries/clientpositive/show_conf.q
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/queries/clientpositive/show_conf.q?rev=1569164&view=auto
    ==============================================================================
    --- hive/trunk/ql/src/test/queries/clientpositive/show_conf.q (added)
    +++ hive/trunk/ql/src/test/queries/clientpositive/show_conf.q Tue Feb 18 02:18:36 2014
    @@ -0,0 +1,3 @@
    +show conf "hive.auto.convert.sortmerge.join.to.mapjoin";
    +
    +show conf "hive.stats.retries.wait";

    Added: hive/trunk/ql/src/test/results/clientpositive/show_conf.q.out
    URL: http://svn.apache.org/viewvc/hive/trunk/ql/src/test/results/clientpositive/show_conf.q.out?rev=1569164&view=auto
    ==============================================================================
    --- hive/trunk/ql/src/test/results/clientpositive/show_conf.q.out (added)
    +++ hive/trunk/ql/src/test/results/clientpositive/show_conf.q.out Tue Feb 18 02:18:36 2014
    @@ -0,0 +1,10 @@
    +PREHOOK: query: show conf "hive.auto.convert.sortmerge.join.to.mapjoin"
    +PREHOOK: type: SHOWCONF
    +POSTHOOK: query: show conf "hive.auto.convert.sortmerge.join.to.mapjoin"
    +POSTHOOK: type: SHOWCONF
    +false BOOLEAN If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join, this parameter decides whether each table should be tried as a big table, and effectively a map-join should be tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster if the complete small table can fit in memory, and a map-join can be performed.
    +PREHOOK: query: show conf "hive.stats.retries.wait"
    +PREHOOK: type: SHOWCONF
    +POSTHOOK: query: show conf "hive.stats.retries.wait"
    +POSTHOOK: type: SHOWCONF
    +3000 INT The base waiting window (in milliseconds) before the next retry. The actual wait time is calculated by baseWindow * failures baseWindow * (failure 1) * (random number between [0.0,1.0]).
  • Brock at Feb 18, 2014 at 2:19 am
    Modified: hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    URL: http://svn.apache.org/viewvc/hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java (original)
    +++ hive/trunk/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java Tue Feb 18 02:18:36 2014
    @@ -27,17 +27,16 @@ import java.net.URL;
      import java.util.ArrayList;
      import java.util.HashMap;
      import java.util.Iterator;
    -import java.util.LinkedHashSet;
      import java.util.List;
      import java.util.Map;
      import java.util.Map.Entry;
      import java.util.Properties;
    -import java.util.Set;
      import java.util.regex.Matcher;
      import java.util.regex.Pattern;

      import javax.security.auth.login.LoginException;

    +import static org.apache.hadoop.hive.conf.Validator.*;
      import org.apache.commons.lang.StringUtils;
      import org.apache.commons.logging.Log;
      import org.apache.commons.logging.LogFactory;
    @@ -166,839 +165,1378 @@ public class HiveConf extends Configurat
         */
        public static enum ConfVars {
          // QL execution stuff
    - SCRIPTWRAPPER("hive.exec.script.wrapper", null),
    - PLAN("hive.exec.plan", ""),
    - PLAN_SERIALIZATION("hive.plan.serialization.format","kryo"),
    - SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive-" + System.getProperty("user.name")),
    - LOCALSCRATCHDIR("hive.exec.local.scratchdir", System.getProperty("java.io.tmpdir") + File.separator + System.getProperty("user.name")),
    - SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700"),
    - SUBMITVIACHILD("hive.exec.submitviachild", false),
    - SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000),
    - ALLOWPARTIALCONSUMP("hive.exec.script.allow.partial.consumption", false),
    - STREAMREPORTERPERFIX("stream.stderr.reporter.prefix", "reporter:"),
    - STREAMREPORTERENABLED("stream.stderr.reporter.enabled", true),
    - COMPRESSRESULT("hive.exec.compress.output", false),
    - COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false),
    - COMPRESSINTERMEDIATECODEC("hive.intermediate.compression.codec", ""),
    - COMPRESSINTERMEDIATETYPE("hive.intermediate.compression.type", ""),
    - BYTESPERREDUCER("hive.exec.reducers.bytes.per.reducer", (long) (1000 * 1000 * 1000)),
    - MAXREDUCERS("hive.exec.reducers.max", 999),
    - PREEXECHOOKS("hive.exec.pre.hooks", ""),
    - POSTEXECHOOKS("hive.exec.post.hooks", ""),
    - ONFAILUREHOOKS("hive.exec.failure.hooks", ""),
    - CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", ""),
    - EXECPARALLEL("hive.exec.parallel", false), // parallel query launching
    - EXECPARALLETHREADNUMBER("hive.exec.parallel.thread.number", 8),
    - HIVESPECULATIVEEXECREDUCERS("hive.mapred.reduce.tasks.speculative.execution", true),
    - HIVECOUNTERSPULLINTERVAL("hive.exec.counters.pull.interval", 1000L),
    - DYNAMICPARTITIONING("hive.exec.dynamic.partition", true),
    - DYNAMICPARTITIONINGMODE("hive.exec.dynamic.partition.mode", "strict"),
    - DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000),
    - DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100),
    - MAXCREATEDFILES("hive.exec.max.created.files", 100000L),
    + SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""),
    + PLAN("hive.exec.plan", "", ""),
    + PLAN_SERIALIZATION("hive.plan.serialization.format", "kryo",
    + "Query plan format serialization between client and task nodes. \n" +
    + "Two supported values are : kryo and javaXML. Kryo is default."),
    + SCRATCHDIR("hive.exec.scratchdir",
    + "/tmp/hive-" + System.getProperty("user.name"),
    + "Scratch space for Hive jobs"),
    + LOCALSCRATCHDIR("hive.exec.local.scratchdir",
    + System.getProperty("java.io.tmpdir") + File.separator + System.getProperty("user.name"),
    + "Local scratch space for Hive jobs"),
    + SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700", ""),
    + SUBMITVIACHILD("hive.exec.submitviachild", false, ""),
    + SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000,
    + "Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). \n" +
    + "This prevents runaway scripts from filling logs partitions to capacity"),
    + ALLOWPARTIALCONSUMP("hive.exec.script.allow.partial.consumption", false,
    + "When enabled, this option allows a user script to exit successfully without consuming all the data from the standard input."),
    + STREAMREPORTERPERFIX("stream.stderr.reporter.prefix", "reporter:",
    + "Streaming jobs that log to standard error with this prefix can log counter or status information."),
    + STREAMREPORTERENABLED("stream.stderr.reporter.enabled", true,
    + "Enable consumption of status and counter messages for streaming jobs."),
    + COMPRESSRESULT("hive.exec.compress.output", false,
    + "This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. \n" +
    + "The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
    + COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false,
    + "This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. \n" +
    + "The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"),
    + COMPRESSINTERMEDIATECODEC("hive.intermediate.compression.codec", "", ""),
    + COMPRESSINTERMEDIATETYPE("hive.intermediate.compression.type", "", ""),
    + BYTESPERREDUCER("hive.exec.reducers.bytes.per.reducer", (long) (1000 * 1000 * 1000),
    + "size per reducer.The default is 1G, i.e if the input size is 10G, it will use 10 reducers."),
    + MAXREDUCERS("hive.exec.reducers.max", 999,
    + "max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is\n" +
    + "negative, Hive will use this one as the max number of reducers when automatically determine number of reducers."),
    + PREEXECHOOKS("hive.exec.pre.hooks", "",
    + "Comma-separated list of pre-execution hooks to be invoked for each statement. \n" +
    + "A pre-execution hook is specified as the name of a Java class which implements the \n" +
    + "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
    + POSTEXECHOOKS("hive.exec.post.hooks", "",
    + "Comma-separated list of post-execution hooks to be invoked for each statement. \n" +
    + "A post-execution hook is specified as the name of a Java class which implements the \n" +
    + "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
    + ONFAILUREHOOKS("hive.exec.failure.hooks", "",
    + "Comma-separated list of on-failure hooks to be invoked for each statement. \n" +
    + "An on-failure hook is specified as the name of Java class which implements the \n" +
    + "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."),
    + CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "",
    + "Comma-separated list of statistics publishers to be invoked on counters on each job. \n" +
    + "A client stats publisher is specified as the name of a Java class which implements the \n" +
    + "org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface."),
    + EXECPARALLEL("hive.exec.parallel", false, "Whether to execute jobs in parallel"),
    + EXECPARALLETHREADNUMBER("hive.exec.parallel.thread.number", 8,
    + "How many jobs at most can be executed in parallel"),
    + HIVESPECULATIVEEXECREDUCERS("hive.mapred.reduce.tasks.speculative.execution", true,
    + "Whether speculative execution for reducers should be turned on. "),
    + HIVECOUNTERSPULLINTERVAL("hive.exec.counters.pull.interval", 1000L,
    + "The interval with which to poll the JobTracker for the counters the running job. \n" +
    + "The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be."),
    + DYNAMICPARTITIONING("hive.exec.dynamic.partition", true,
    + "Whether or not to allow dynamic partitions in DML/DDL."),
    + DYNAMICPARTITIONINGMODE("hive.exec.dynamic.partition.mode", "strict",
    + "In strict mode, the user must specify at least one static partition in case the user accidentally overwrites all partitions."),
    + DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000,
    + "Maximum number of dynamic partitions allowed to be created in total."),
    + DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100,
    + "Maximum number of dynamic partitions allowed to be created in each mapper/reducer node."),
    + MAXCREATEDFILES("hive.exec.max.created.files", 100000L,
    + "Maximum number of HDFS files created by all mappers/reducers in a MapReduce job."),
          DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir",
    - System.getProperty("java.io.tmpdir") + File.separator + "${hive.session.id}_resources"),
    - DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__"),
    - DEFAULT_ZOOKEEPER_PARTITION_NAME("hive.lockmgr.zookeeper.default.partition.name", "__HIVE_DEFAULT_ZOOKEEPER_PARTITION__"),
    + System.getProperty("java.io.tmpdir") + File.separator + "${hive.session.id}_resources",
    + "The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" +
    + "This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" +
    + "The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."),
    + DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__", ""),
    + DEFAULT_ZOOKEEPER_PARTITION_NAME("hive.lockmgr.zookeeper.default.partition.name", "__HIVE_DEFAULT_ZOOKEEPER_PARTITION__", ""),
          // Whether to show a link to the most failed task + debugging tips
    - SHOW_JOB_FAIL_DEBUG_INFO("hive.exec.show.job.failure.debug.info", true),
    - JOB_DEBUG_CAPTURE_STACKTRACES("hive.exec.job.debug.capture.stacktraces", true),
    - JOB_DEBUG_TIMEOUT("hive.exec.job.debug.timeout", 30000),
    - TASKLOG_DEBUG_TIMEOUT("hive.exec.tasklog.debug.timeout", 20000),
    - OUTPUT_FILE_EXTENSION("hive.output.file.extension", null),
    -
    - HIVE_IN_TEST("hive.in.test", false), // internal usage only, true in test mode
    -
    - // should hive determine whether to run in local mode automatically ?
    - LOCALMODEAUTO("hive.exec.mode.local.auto", false),
    - // if yes:
    - // run in local mode only if input bytes is less than this. 128MB by default
    - LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L),
    - // run in local mode only if number of tasks (for map and reduce each) is
    - // less than this
    - LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4),
    - // if true, DROP TABLE/VIEW does not fail if table/view doesn't exist and IF EXISTS is
    - // not specified
    - DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true),

    - // ignore the mapjoin hint
    - HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true),
    + SHOW_JOB_FAIL_DEBUG_INFO("hive.exec.show.job.failure.debug.info", true,
    + "If a job fails, whether to provide a link in the CLI to the task with the\n" +
    + "most failures, along with debugging hints if applicable."),
    + JOB_DEBUG_CAPTURE_STACKTRACES("hive.exec.job.debug.capture.stacktraces", true,
    + "Whether or not stack traces parsed from the task logs of a sampled failed task \n" +
    + "for each failed job should be stored in the SessionState"),
    + JOB_DEBUG_TIMEOUT("hive.exec.job.debug.timeout", 30000, ""),
    + TASKLOG_DEBUG_TIMEOUT("hive.exec.tasklog.debug.timeout", 20000, ""),
    + OUTPUT_FILE_EXTENSION("hive.output.file.extension", null,
    + "String used as a file extension for output files. If not set, defaults to the codec extension for text files (e.g. \".gz\"), or no extension otherwise."),
    +
    + HIVE_IN_TEST("hive.in.test", false, "internal usage only, true in test mode", true),
    +
    + LOCALMODEAUTO("hive.exec.mode.local.auto", false,
    + "Let Hive determine whether to run in local mode automatically"),
    + LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L,
    + "When hive.exec.mode.local.auto is true, input bytes should less than this for local mode."),
    + LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4,
    + "When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."),

    - // Max number of lines of footer user can set for a table file.
    - HIVE_FILE_MAX_FOOTER("hive.file.max.footer", 100),
    + DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true,
    + "Do not report an error if DROP TABLE/VIEW specifies a non-existent table/view"),
    +
    + HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"),
    +
    + HIVE_FILE_MAX_FOOTER("hive.file.max.footer", 100,
    + "maximum number of lines for footer user can define for a table file"),

          // Hadoop Configuration Properties
          // Properties with null values are ignored and exist only for the purpose of giving us
          // a symbolic name to reference in the Hive source code. Properties with non-null
          // values will override any values set in the underlying Hadoop configuration.
    - HADOOPBIN("hadoop.bin.path", findHadoopBinary()),
    - HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem"),
    - HADOOPFS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPFS"), null),
    - HADOOPMAPFILENAME(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPFILENAME"), null),
    - HADOOPMAPREDINPUTDIR(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPREDINPUTDIR"), null),
    - HADOOPMAPREDINPUTDIRRECURSIVE(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPREDINPUTDIRRECURSIVE"), false),
    - MAPREDMAXSPLITSIZE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMAXSPLITSIZE"), 256000000L),
    - MAPREDMINSPLITSIZE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"), 1L),
    - MAPREDMINSPLITSIZEPERNODE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERNODE"), 1L),
    - MAPREDMINSPLITSIZEPERRACK(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERRACK"), 1L),
    + HADOOPBIN("hadoop.bin.path", findHadoopBinary(), ""),
    + HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem",
    + "The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20"),
    + HADOOPFS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPFS"), null, "", true),
    + HADOOPMAPFILENAME(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPFILENAME"), null, "", true),
    + HADOOPMAPREDINPUTDIR(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPREDINPUTDIR"), null, "", true),
    + HADOOPMAPREDINPUTDIRRECURSIVE(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPMAPREDINPUTDIRRECURSIVE"), false, "", true),
    + MAPREDMAXSPLITSIZE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMAXSPLITSIZE"), 256000000L, "", true),
    + MAPREDMINSPLITSIZE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZE"), 1L, "", true),
    + MAPREDMINSPLITSIZEPERNODE(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERNODE"), 1L, "", true),
    + MAPREDMINSPLITSIZEPERRACK(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDMINSPLITSIZEPERRACK"), 1L, "", true),
          // The number of reduce tasks per job. Hadoop sets this value to 1 by default
          // By setting this property to -1, Hive will automatically determine the correct
          // number of reducers.
    - HADOOPNUMREDUCERS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPNUMREDUCERS"), -1),
    - HADOOPJOBNAME(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPJOBNAME"), null),
    - HADOOPSPECULATIVEEXECREDUCERS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPSPECULATIVEEXECREDUCERS"), true),
    - MAPREDSETUPCLEANUPNEEDED(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDSETUPCLEANUPNEEDED"), false),
    - MAPREDTASKCLEANUPNEEDED(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDTASKCLEANUPNEEDED"), false),
    + HADOOPNUMREDUCERS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPNUMREDUCERS"), -1, "", true),
    + HADOOPJOBNAME(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPJOBNAME"), null, "", true),
    + HADOOPSPECULATIVEEXECREDUCERS(ShimLoader.getHadoopShims().getHadoopConfNames().get("HADOOPSPECULATIVEEXECREDUCERS"), true, "", true),
    + MAPREDSETUPCLEANUPNEEDED(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDSETUPCLEANUPNEEDED"), false, "", true),
    + MAPREDTASKCLEANUPNEEDED(ShimLoader.getHadoopShims().getHadoopConfNames().get("MAPREDTASKCLEANUPNEEDED"), false, "", true),

          // Metastore stuff. Be sure to update HiveConf.metaVars when you add
          // something here!
    - METASTOREDIRECTORY("hive.metastore.metadb.dir", ""),
    - METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse"),
    - METASTOREURIS("hive.metastore.uris", ""),
    - // Number of times to retry a connection to a Thrift metastore server
    - METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3),
    - // Number of times to retry a Thrift metastore call upon failure
    - METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1),
    -
    - // Number of seconds the client should wait between connection attempts
    - METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", 1),
    - // Socket timeout for the client connection (in seconds)
    - METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", 20),
    - METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine"),
    - // Class name of JDO connection url hook
    - METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", ""),
    - METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true),
    - // Name of the connection url in the configuration
    + METASTOREDIRECTORY("hive.metastore.metadb.dir", "", ""),
    + METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse",
    + "location of default database for the warehouse"),
    + METASTOREURIS("hive.metastore.uris", "",
    + "Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."),
    +
    + METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3,
    + "Number of retries while opening a connection to metastore"),
    + METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1,
    + "Number of retries upon failure of Thrift metastore calls"),
    +
    + METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", 1,
    + "Number of seconds for the client to wait between consecutive connection attempts"),
    + METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", 20,
    + "MetaStore Client socket timeout in seconds"),
    + METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine",
    + "password to use against metastore database"),
    + METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", "",
    + "Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"),
    + METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true,
    + "Set this to true if multiple threads access metastore through JDO concurrently."),
          METASTORECONNECTURLKEY("javax.jdo.option.ConnectionURL",
    - "jdbc:derby:;databaseName=metastore_db;create=true"),
    - // Whether to force reloading of the metastore configuration (including
    - // the connection URL, before the next metastore query that accesses the
    - // datastore. Once reloaded, this value is reset to false. Used for
    - // testing only.
    - METASTOREFORCERELOADCONF("hive.metastore.force.reload.conf", false),
    - // Number of attempts to retry connecting after there is a JDO datastore err
    - HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 1),
    - // Number of miliseconds to wait between attepting
    - HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", 1000),
    - // Whether to force reloading of the HMSHandler configuration (including
    - // the connection URL, before the next metastore query that accesses the
    - // datastore. Once reloaded, this value is reset to false. Used for
    - // testing only.
    - HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false),
    - METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200),
    - METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 100000),
    - METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true),
    - // Intermediate dir suffixes used for archiving. Not important what they
    - // are, as long as collisions are avoided
    + "jdbc:derby:;databaseName=metastore_db;create=true",
    + "JDBC connect string for a JDBC metastore"),
    +
    + METASTOREFORCERELOADCONF("hive.metastore.force.reload.conf", false,
    + "Whether to force reloading of the metastore configuration (including\n" +
    + "the connection URL, before the next metastore query that accesses the\n" +
    + "datastore. Once reloaded, this value is reset to false. Used for\n" +
    + "testing only.\n"),
    + HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 1,
    + "The number of times to retry a HMSHandler call if there were a connection error"),
    + HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", 1000,
    + "The number of milliseconds between HMSHandler retry attempts"),
    + HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false,
    + "Whether to force reloading of the HMSHandler configuration (including\n" +
    + "the connection URL, before the next metastore query that accesses the\n" +
    + "datastore. Once reloaded, this value is reset to false. Used for\n" +
    + "testing only.\n"),
    + METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200,
    + "Minimum number of worker threads in the Thrift server's pool."),
    + METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 100000,
    + "Maximum number of worker threads in the Thrift server's pool."),
    + METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true,
    + "Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."),
    +
          METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original",
    - "_INTERMEDIATE_ORIGINAL"),
    + "_INTERMEDIATE_ORIGINAL",
    + "Intermediate dir suffixes used for archiving. Not important what they\n" +
    + "are, as long as collisions are avoided\n"),
          METASTORE_INT_ARCHIVED("hive.metastore.archive.intermediate.archived",
    - "_INTERMEDIATE_ARCHIVED"),
    + "_INTERMEDIATE_ARCHIVED", ""),
          METASTORE_INT_EXTRACTED("hive.metastore.archive.intermediate.extracted",
    - "_INTERMEDIATE_EXTRACTED"),
    - METASTORE_KERBEROS_KEYTAB_FILE("hive.metastore.kerberos.keytab.file", ""),
    + "_INTERMEDIATE_EXTRACTED", ""),
    + METASTORE_KERBEROS_KEYTAB_FILE("hive.metastore.kerberos.keytab.file", "",
    + "The path to the Kerberos Keytab file containing the metastore Thrift server's service principal."),
          METASTORE_KERBEROS_PRINCIPAL("hive.metastore.kerberos.principal",
    - "hive-metastore/_HOST@EXAMPLE.COM"),
    - METASTORE_USE_THRIFT_SASL("hive.metastore.sasl.enabled", false),
    - METASTORE_USE_THRIFT_FRAMED_TRANSPORT("hive.metastore.thrift.framed.transport.enabled", false),
    - METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS(
    - "hive.cluster.delegation.token.store.class",
    - "org.apache.hadoop.hive.thrift.MemoryTokenStore"),
    + "hive-metastore/_HOST@EXAMPLE.COM",
    + "The service principal for the metastore Thrift server. The special string _HOST will be replaced automatically with the correct host name."),
    + METASTORE_USE_THRIFT_SASL("hive.metastore.sasl.enabled", false,
    + "If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos."),
    + METASTORE_USE_THRIFT_FRAMED_TRANSPORT("hive.metastore.thrift.framed.transport.enabled", false,
    + "If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used."),
    + METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS("hive.cluster.delegation.token.store.class",
    + "org.apache.hadoop.hive.thrift.MemoryTokenStore",
    + "The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster."),
          METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR(
    - "hive.cluster.delegation.token.store.zookeeper.connectString", ""),
    + "hive.cluster.delegation.token.store.zookeeper.connectString", "",
    + "The ZooKeeper token store connect string."),
          METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE(
    - "hive.cluster.delegation.token.store.zookeeper.znode", "/hive/cluster/delegation"),
    + "hive.cluster.delegation.token.store.zookeeper.znode", "/hive/cluster/delegation",
    + "The root path for token store data."),
          METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL(
    - "hive.cluster.delegation.token.store.zookeeper.acl", ""),
    - METASTORE_CACHE_PINOBJTYPES("hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order"),
    - METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "BONECP"),
    - METASTORE_VALIDATE_TABLES("datanucleus.validateTables", false),
    - METASTORE_VALIDATE_COLUMNS("datanucleus.validateColumns", false),
    - METASTORE_VALIDATE_CONSTRAINTS("datanucleus.validateConstraints", false),
    - METASTORE_STORE_MANAGER_TYPE("datanucleus.storeManagerType", "rdbms"),
    - METASTORE_AUTO_CREATE_SCHEMA("datanucleus.autoCreateSchema", true),
    - METASTORE_FIXED_DATASTORE("datanucleus.fixedDatastore", false),
    - METASTORE_SCHEMA_VERIFICATION("hive.metastore.schema.verification", false),
    - METASTORE_AUTO_START_MECHANISM_MODE("datanucleus.autoStartMechanismMode", "checked"),
    - METASTORE_TRANSACTION_ISOLATION("datanucleus.transactionIsolation", "read-committed"),
    - METASTORE_CACHE_LEVEL2("datanucleus.cache.level2", false),
    - METASTORE_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type", "none"),
    - METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus1"),
    - METASTORE_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy", true),
    - METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG"),
    - METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300),
    + "hive.cluster.delegation.token.store.zookeeper.acl", "",
    + "ACL for token store entries. List comma separated all server principals for the cluster."),
    + METASTORE_CACHE_PINOBJTYPES("hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order",
    + "List of comma separated metastore object types that should be pinned in the cache"),
    + METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "BONECP",
    + "Specify connection pool library for datanucleus"),
    + METASTORE_VALIDATE_TABLES("datanucleus.validateTables", false,
    + "validates existing schema against code. turn this on if you want to verify existing schema"),
    + METASTORE_VALIDATE_COLUMNS("datanucleus.validateColumns", false,
    + "validates existing schema against code. turn this on if you want to verify existing schema"),
    + METASTORE_VALIDATE_CONSTRAINTS("datanucleus.validateConstraints", false,
    + "validates existing schema against code. turn this on if you want to verify existing schema"),
    + METASTORE_STORE_MANAGER_TYPE("datanucleus.storeManagerType", "rdbms", "metadata store type"),
    + METASTORE_AUTO_CREATE_SCHEMA("datanucleus.autoCreateSchema", true,
    + "creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once"),
    + METASTORE_FIXED_DATASTORE("datanucleus.fixedDatastore", false, ""),
    + METASTORE_SCHEMA_VERIFICATION("hive.metastore.schema.verification", false,
    + "Enforce metastore schema version consistency.\n" +
    + "True: Verify that version information stored in metastore matches with one from Hive jars. Also disable automatic\n" +
    + " schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures\n" +
    + " proper metastore schema migration. (Default)\n" +
    + "False: Warn if the version information stored in metastore doesn't match with one from in Hive jars."),
    + METASTORE_AUTO_START_MECHANISM_MODE("datanucleus.autoStartMechanismMode", "checked",
    + "throw exception if metadata tables are incorrect"),
    + METASTORE_TRANSACTION_ISOLATION("datanucleus.transactionIsolation", "read-committed",
    + "Default transaction isolation level for identity generation."),
    + METASTORE_CACHE_LEVEL2("datanucleus.cache.level2", false,
    + "Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server"),
    + METASTORE_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type", "none", ""),
    + METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus1",
    + "Name of the identifier factory to use when generating table/column names etc. \n" +
    + "'datanucleus1' is used for backward compatibility with DataNucleus v1"),
    + METASTORE_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""),
    + METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG",
    + "Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]"),
    + METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300,
    + "Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" +
    + "The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" +
    + "but it may also cause higher memory requirement at the client side."),
          METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX(
    - "hive.metastore.batch.retrieve.table.partition.max", 1000),
    - // A comma separated list of hooks which implement MetaStoreInitListener and will be run at
    - // the beginning of HMSHandler initialization
    - METASTORE_INIT_HOOKS("hive.metastore.init.hooks", ""),
    - METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", ""),
    - METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", ""),
    - // should we do checks against the storage (usually hdfs) for operations like drop_partition
    - METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS("hive.metastore.authorization.storage.checks", false),
    - METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq",0L),
    - METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration",0L),
    - METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", false),
    - METASTORE_PARTITION_NAME_WHITELIST_PATTERN(
    - "hive.metastore.partition.name.whitelist.pattern", ""),
    - // Whether to enable integral JDO pushdown. For partition columns storing integers
    - // in non-canonical form, (e.g. '012'), it may not work, so it's off by default.
    - METASTORE_INTEGER_JDO_PUSHDOWN("hive.metastore.integral.jdo.pushdown", false),
    - METASTORE_TRY_DIRECT_SQL("hive.metastore.try.direct.sql", true),
    - METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true),
    - METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
    - "hive.metastore.disallow.incompatible.col.type.changes", false),
    + "hive.metastore.batch.retrieve.table.partition.max", 1000,
    + "Maximum number of table partitions that metastore internally retrieves in one batch."),

    - // Default parameters for creating tables
    - NEWTABLEDEFAULTPARA("hive.table.parameters.default", ""),
    - // Parameters to copy over when creating a table with Create Table Like.
    - DDL_CTL_PARAMETERS_WHITELIST("hive.ddl.createtablelike.properties.whitelist", ""),
    - METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl",
    - "org.apache.hadoop.hive.metastore.ObjectStore"),
    - METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName",
    - "org.apache.derby.jdbc.EmbeddedDriver"),
    + METASTORE_INIT_HOOKS("hive.metastore.init.hooks", "",
    + "A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. \n" +
    + "An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener."),
    + METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", "",
    + "List of comma separated listeners for metastore events."),
    + METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", "", ""),
    + METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS("hive.metastore.authorization.storage.checks", false,
    + "Should the metastore do authorization checks against the underlying storage (usually hdfs) \n" +
    + "for operations like drop-partition (disallow the drop-partition if the user in\n" +
    + "question doesn't have permissions to delete the corresponding directory\n" +
    + "on the storage).\n"),
    + METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq", 0L,
    + "Frequency at which timer task runs to purge expired events in metastore(in seconds)."),
    + METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration", 0L,
    + "Duration after which events expire from events table (in seconds)"),
    + METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", false,
    + "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" +
    + "the client's reported user and group permissions. Note that this property must be set on " +
    + "both the client and server sides. Further note that its best effort. \n" +
    + "If client sets its to true and server sets it to false, client setting will be ignored."),
    + METASTORE_PARTITION_NAME_WHITELIST_PATTERN("hive.metastore.partition.name.whitelist.pattern", "",
    + "Partition names will be checked against this regex pattern and rejected if not matched."),
    +
    + METASTORE_INTEGER_JDO_PUSHDOWN("hive.metastore.integral.jdo.pushdown", false,
    + "Allow JDO query pushdown for integral partition columns in metastore. Off by default. This\n" +
    + "improves metastore perf for integral columns, especially if there's a large number of partitions.\n" +
    + "However, it doesn't work correctly with integral values that are not normalized (e.g. have\n" +
    + "leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization\n" +
    + "is also irrelevant."),
    + METASTORE_TRY_DIRECT_SQL("hive.metastore.try.direct.sql", true, ""),
    + METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true, ""),
    + METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES(
    + "hive.metastore.disallow.incompatible.col.type.changes", false,
    + "If true (default is false), ALTER TABLE operations which change the type of \n" +
    + "a column (say STRING) to an incompatible type (say MAP&lt;STRING, STRING&gt;) are disallowed. \n" +
    + "RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" +
    + "datatypes can be converted from string to any type. The map is also serialized as\n" +
    + "a string, which can be read as a string as well. However, with any binary \n" +
    + "serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions\n" +
    + "when subsequently trying to access old partitions. \n" +
    + "\n" +
    + "Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are \n" +
    + "not blocked. \n" +
    + "\n" +
    + "See HIVE-4409 for more details."),
    +
    + NEWTABLEDEFAULTPARA("hive.table.parameters.default", "",
    + "Default property values for newly created tables"),
    + DDL_CTL_PARAMETERS_WHITELIST("hive.ddl.createtablelike.properties.whitelist", "",
    + "Table Properties to copy over when executing a Create Table Like."),
    + METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore",
    + "Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \n" +
    + "This class is used to store and retrieval of raw metadata objects such as table, database"),
    + METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver",
    + "Driver class name for a JDBC metastore"),
          METASTORE_MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass",
    - "org.datanucleus.api.jdo.JDOPersistenceManagerFactory"),
    + "org.datanucleus.api.jdo.JDOPersistenceManagerFactory",
    + "class implementing the jdo persistence"),
          METASTORE_EXPRESSION_PROXY_CLASS("hive.metastore.expression.proxy",
    - "org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore"),
    - METASTORE_DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", true),
    - METASTORE_NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", true),
    - METASTORE_CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName", "APP"),
    - METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", ""),
    - METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties",""),
    + "org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore", ""),
    + METASTORE_DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", true,
    + "Detaches all objects from session so that they can be used after transaction is committed"),
    + METASTORE_NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", true,
    + "Reads outside of transactions"),
    + METASTORE_CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName", "APP",
    + "Username to use against metastore database"),
    + METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", "",
    + "List of comma separated listeners for the end of metastore functions."),
    + METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties", "",
    + "List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n" +
    + "* implies all the keys will get inherited."),

          // Parameters for exporting metadata on table drop (requires the use of the)
          // org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener
    - METADATA_EXPORT_LOCATION("hive.metadata.export.location", ""),
    - MOVE_EXPORTED_METADATA_TO_TRASH("hive.metadata.move.exported.metadata.to.trash", true),
    + METADATA_EXPORT_LOCATION("hive.metadata.export.location", "",
    + "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
    + "it is the location to which the metadata will be exported. The default is an empty string, which results in the \n" +
    + "metadata being exported to the current user's home directory on HDFS."),
    + MOVE_EXPORTED_METADATA_TO_TRASH("hive.metadata.move.exported.metadata.to.trash", true,
    + "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" +
    + "this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory \n" +
    + "alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."),

          // CLI
    - CLIIGNOREERRORS("hive.cli.errors.ignore", false),
    - CLIPRINTCURRENTDB("hive.cli.print.current.db", false),
    - CLIPROMPT("hive.cli.prompt", "hive"),
    - CLIPRETTYOUTPUTNUMCOLS("hive.cli.pretty.output.num.cols", -1),
    + CLIIGNOREERRORS("hive.cli.errors.ignore", false, ""),
    + CLIPRINTCURRENTDB("hive.cli.print.current.db", false,
    + "Whether to include the current database in the Hive prompt."),
    + CLIPROMPT("hive.cli.prompt", "hive",
    + "Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" +
    + "Variable substitution will only be invoked at the Hive CLI startup."),
    + CLIPRETTYOUTPUTNUMCOLS("hive.cli.pretty.output.num.cols", -1,
    + "The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command.\n" +
    + "If the value of this property is -1, then Hive will use the auto-detected terminal width."),

    - HIVE_METASTORE_FS_HANDLER_CLS("hive.metastore.fs.handler.class", "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl"),
    + HIVE_METASTORE_FS_HANDLER_CLS("hive.metastore.fs.handler.class", "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""),

          // Things we log in the jobconf

          // session identifier
    - HIVESESSIONID("hive.session.id", ""),
    + HIVESESSIONID("hive.session.id", "", ""),
          // whether session is running in silent mode or not
    - HIVESESSIONSILENT("hive.session.silent", false),
    + HIVESESSIONSILENT("hive.session.silent", false, ""),

    - // Whether to enable history for this session
    - HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false),
    + HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false,
    + "Whether to log Hive query, query plan, runtime statistics etc."),

    - // query being executed (multiple per session)
    - HIVEQUERYSTRING("hive.query.string", ""),
    + HIVEQUERYSTRING("hive.query.string", "",
    + "Query being executed (might be multiple per a session)"),

    - // id of query being executed (multiple per session)
    - HIVEQUERYID("hive.query.id", ""),
    + HIVEQUERYID("hive.query.id", "",
    + "ID for query being executed (might be multiple per a session)"),

    - // id of the mapred plan being executed (multiple per query)
    - HIVEPLANID("hive.query.planid", ""),
    - // max jobname length
    - HIVEJOBNAMELENGTH("hive.jobname.length", 50),
    + HIVEJOBNAMELENGTH("hive.jobname.length", 50, "max jobname length"),

          // hive jar
    - HIVEJAR("hive.jar.path", ""),
    - HIVEAUXJARS("hive.aux.jars.path", ""),
    + HIVEJAR("hive.jar.path", "", ""),
    + HIVEAUXJARS("hive.aux.jars.path", "", ""),

          // hive added files and jars
    - HIVEADDEDFILES("hive.added.files.path", ""),
    - HIVEADDEDJARS("hive.added.jars.path", ""),
    - HIVEADDEDARCHIVES("hive.added.archives.path", ""),
    + HIVEADDEDFILES("hive.added.files.path", "", ""),
    + HIVEADDEDJARS("hive.added.jars.path", "", ""),
    + HIVEADDEDARCHIVES("hive.added.archives.path", "", ""),

    - HIVE_CURRENT_DATABASE("hive.current.database", ""), // internal usage only
    + HIVE_CURRENT_DATABASE("hive.current.database", "", "current database in using. internal usage only", true),

          // for hive script operator
    - HIVES_AUTO_PROGRESS_TIMEOUT("hive.auto.progress.timeout", 0),
    - HIVETABLENAME("hive.table.name", ""),
    - HIVEPARTITIONNAME("hive.partition.name", ""),
    - HIVESCRIPTAUTOPROGRESS("hive.script.auto.progress", false),
    - HIVESCRIPTIDENVVAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID"),
    - HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false),
    - HIVEMAPREDMODE("hive.mapred.mode", "nonstrict"),
    - HIVEALIAS("hive.alias", ""),
    - HIVEMAPSIDEAGGREGATE("hive.map.aggr", true),
    - HIVEGROUPBYSKEW("hive.groupby.skewindata", false),
    - HIVE_OPTIMIZE_MULTI_GROUPBY_COMMON_DISTINCTS("hive.optimize.multigroupby.common.distincts",
    - true),
    - HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000),
    - HIVEJOINCACHESIZE("hive.join.cache.size", 25000),
    + HIVES_AUTO_PROGRESS_TIMEOUT("hive.auto.progress.timeout", 0,
    + "How long to run autoprogressor for the script/UDTF operators (in seconds).\n" +
    + "Set to 0 for forever."),
    + HIVETABLENAME("hive.table.name", "", ""),
    + HIVEPARTITIONNAME("hive.partition.name", "", ""),
    + HIVESCRIPTAUTOPROGRESS("hive.script.auto.progress", false,
    + "Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker \n" +
    + "to avoid the task getting killed because of inactivity. Hive sends progress information when the script is \n" +
    + "outputting to stderr. This option removes the need of periodically producing stderr messages, \n" +
    + "but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker."),
    + HIVESCRIPTIDENVVAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID",
    + "Name of the environment variable that holds the unique script operator ID in the user's \n" +
    + "transform function (the custom mapper/reducer that the user has specified in the query)"),
    + HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false,
    + "Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)"),
    + HIVEMAPREDMODE("hive.mapred.mode", "nonstrict",
    + "The mode in which the Hive operations are being performed. \n" +
    + "In strict mode, some risky queries are not allowed to run. They include:\n" +
    + " Cartesian Product.\n" +
    + " No partition being picked up for a query.\n" +
    + " Comparing bigints and strings.\n" +
    + " Comparing bigints and doubles.\n" +
    + " Orderby without limit."),
    + HIVEALIAS("hive.alias", "", ""),
    + HIVEMAPSIDEAGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"),
    + HIVEGROUPBYSKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"),
    + HIVE_OPTIMIZE_MULTI_GROUPBY_COMMON_DISTINCTS("hive.optimize.multigroupby.common.distincts", true,
    + "Whether to optimize a multi-groupby query with the same distinct.\n" +
    + "Consider a query like:\n" +
    + "\n" +
    + " from src\n" +
    + " insert overwrite table dest1 select col1, count(distinct colx) group by col1\n" +
    + " insert overwrite table dest2 select col2, count(distinct colx) group by col2;\n" +
    + "\n" +
    + "With this parameter set to true, first we spray by the distinct value (colx), and then\n" +
    + "perform the 2 groups bys. This makes sense if map-side aggregation is turned off. However,\n" +
    + "with maps-side aggregation, it might be useful in some cases to treat the 2 inserts independently, \n" +
    + "thereby performing the query above in 2MR jobs instead of 3 (due to spraying by distinct key first).\n" +
    + "If this parameter is turned off, we don't consider the fact that the distinct key is the same across\n" +
    + "different MR jobs."),
    + HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000,
    + "How many rows in the right-most join operand Hive should buffer before emitting the join result."),
    + HIVEJOINCACHESIZE("hive.join.cache.size", 25000,
    + "How many rows in the joining tables (except the streaming table) should be cached in memory."),

          // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row,
          // need to remove by hive .13. Also, do not change default (see SMB operator)
    - HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100),
    + HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""),

    - HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000),
    - HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000),
    - HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5),
    - HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3),
    - HIVEMAPAGGRMEMORYTHRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9),
    - HIVEMAPAGGRHASHMINREDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.5),
    - HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true),
    - HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", false),
    - HIVE_MAP_GROUPBY_SORT_TESTMODE("hive.map.groupby.sorted.testmode", false),
    - HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false),
    - HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30),
    + HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000,
    + "How many rows with the same key value should be cached in memory per smb joined table."),
    + HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000,
    + "Number of rows after which size of the grouping keys/aggregation classes is performed"),
    + HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5,
    + "Portion of total memory to be used by map-side group aggregation hash table"),
    + HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3,
    + "Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join"),
    + HIVEMAPAGGRMEMORYTHRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9,
    + "The max memory to be used by map-side group aggregation hash table, if the memory usage is higher than this number, force to flush data"),
    + HIVEMAPAGGRHASHMINREDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.5,
    + "Hash aggregation will be turned off if the ratio between hash table size and input rows is bigger than this number. \n" +
    + "Set to 1 to make sure hash aggregation is never turned off."),
    + HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true,
    + "Whether to optimize multi group by query to generate single M/R job plan. If the multi group by query has \n" +
    + "common group by keys, it will be optimized to generate single M/R job."),
    + HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", false,
    + "If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" +
    + "the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this\n" +
    + "is that it limits the number of mappers to the number of files."),
    + HIVE_MAP_GROUPBY_SORT_TESTMODE("hive.map.groupby.sorted.testmode", false,
    + "If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" +
    + "the group by in the mapper by using BucketizedHiveInputFormat. If the test mode is set, the plan\n" +
    + "is not converted, but a query property is set to denote the same."),
    + HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false,
    + "Whether to enable using Column Position Alias in Group By or Order By"),
    + HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30,
    + "Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.\n" +
    + "For a query like: select a, b, c, count(1) from T group by a, b, c with rollup;\n" +
    + "4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null).\n" +
    + "This can lead to explosion across map-reduce boundary if the cardinality of T is very high,\n" +
    + "and map-side aggregation does not do a very good job. \n" +
    + "\n" +
    + "This parameter decides if Hive should add an additional map-reduce job. If the grouping set\n" +
    + "cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" +
    + "assumption that the original group by will reduce the data size."),

          // for hive udtf operator
    - HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false),
    -
    - // Default file format for CREATE TABLE statement
    - // Options: TextFile, SequenceFile
    - HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile",
    - new StringsValidator("TextFile", "SequenceFile", "RCfile", "ORC")),
    - HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "TextFile",
    - new StringsValidator("TextFile", "SequenceFile", "RCfile")),
    - HIVECHECKFILEFORMAT("hive.fileformat.check", true),
    + HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false,
    + "Whether Hive should automatically send progress information to TaskTracker \n" +
    + "when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious \n" +
    + "because this may prevent TaskTracker from killing tasks with infinite loops."),
    +
    + HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC"),
    + "Default file format for CREATE TABLE statement. \n" +
    + "Options are TextFile and SequenceFile. Users can explicitly say CREATE TABLE ... STORED AS <TEXTFILE|SEQUENCEFILE> to override"),
    + HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile"), ""),
    + HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"),

          // default serde for rcfile
          HIVEDEFAULTRCFILESERDE("hive.default.rcfile.serde",
    - "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe"),
    + "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe",
    + "The default SerDe Hive will use for the RCFile format"),

    - //Location of Hive run time structured log file
    - HIVEHISTORYFILELOC("hive.querylog.location", System.getProperty("java.io.tmpdir") + File.separator + System.getProperty("user.name")),
    + HIVEHISTORYFILELOC("hive.querylog.location",
    + System.getProperty("java.io.tmpdir") + File.separator + System.getProperty("user.name"),
    + "Location of Hive run time structured log file"),
    +
    + HIVE_LOG_INCREMENTAL_PLAN_PROGRESS("hive.querylog.enable.plan.progress", true,
    + "Whether to log the plan's progress every time a job's progress is checked.\n" +
    + "These logs are written to the location specified by hive.querylog.location"),
    +
    + HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL("hive.querylog.plan.progress.interval", 60000L,
    + "The interval to wait between logging the plan's progress in milliseconds.\n" +
    + "If there is a whole number percentage change in the progress of the mappers or the reducers,\n" +
    + "the progress is logged regardless of this value.\n" +
    + "The actual interval will be the ceiling of (this value divided by the value of\n" +
    + "hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval\n" +
    + "I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be\n" +
    + "logged less frequently than specified.\n" +
    + "This only has an effect if hive.querylog.enable.plan.progress is set to true."),

    - // Whether to log the plan's progress every time a job's progress is checked
    - HIVE_LOG_INCREMENTAL_PLAN_PROGRESS("hive.querylog.enable.plan.progress", true),
    -
    - // The interval between logging the plan's progress in milliseconds
    - HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL("hive.querylog.plan.progress.interval", 60000L),
    -
    - // Default serde and record reader for user scripts
    - HIVESCRIPTSERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"),
    + HIVESCRIPTSERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",
    + "The default SerDe for transmitting input data to and reading output data from the user scripts. "),
          HIVESCRIPTRECORDREADER("hive.script.recordreader",
    - "org.apache.hadoop.hive.ql.exec.TextRecordReader"),
    + "org.apache.hadoop.hive.ql.exec.TextRecordReader",
    + "The default record reader for reading data from the user scripts. "),
          HIVESCRIPTRECORDWRITER("hive.script.recordwriter",
    - "org.apache.hadoop.hive.ql.exec.TextRecordWriter"),
    - HIVESCRIPTESCAPE("hive.transform.escape.input", false),
    - HIVEBINARYRECORDMAX("hive.binary.record.max.length", 1000 ),
    + "org.apache.hadoop.hive.ql.exec.TextRecordWriter",
    + "The default record writer for writing data to the user scripts. "),
    + HIVESCRIPTESCAPE("hive.transform.escape.input", false,
    + "This adds an option to escape special chars (newlines, carriage returns and\n" +
    + "tabs) when they are passed to the user script. This is useful if the Hive tables\n" +
    + "can contain data that contains special characters."),
    + HIVEBINARYRECORDMAX("hive.binary.record.max.length", 1000,
    + "Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. \n" +
    + "The last record before the end of stream can have less than hive.binary.record.max.length bytes"),

          // HWI
    - HIVEHWILISTENHOST("hive.hwi.listen.host", "0.0.0.0"),
    - HIVEHWILISTENPORT("hive.hwi.listen.port", "9999"),
    - HIVEHWIWARFILE("hive.hwi.war.file", System.getenv("HWI_WAR_FILE")),
    + HIVEHWILISTENHOST("hive.hwi.listen.host", "0.0.0.0", "This is the host address the Hive Web Interface will listen on"),
    + HIVEHWILISTENPORT("hive.hwi.listen.port", "9999", "This is the port the Hive Web Interface will listen on"),
    + HIVEHWIWARFILE("hive.hwi.war.file", System.getenv("HWI_WAR_FILE"),
    + "This sets the path to the HWI war file, relative to ${HIVE_HOME}. "),

    - // mapper/reducer memory in local mode
    - HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0),
    + HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"),

          //small table file size
    - HIVESMALLTABLESFILESIZE("hive.mapjoin.smalltable.filesize",25000000L), //25M
    + HIVESMALLTABLESFILESIZE("hive.mapjoin.smalltable.filesize", 25000000L,
    + "The threshold for the input file size of the small tables; if the file size is smaller \n" +
    + "than this threshold, it will try to convert the common join into map join"),

    - // random number for split sampling
    - HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0),
    + HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0,
    + "A number used to percentage sampling. By changing this number, user will change the subsets of data sampled."),

          // test mode in hive mode
    - HIVETESTMODE("hive.test.mode", false),
    - HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_"),
    - HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32),
    - HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", ""),
    -
    - HIVEMERGEMAPFILES("hive.merge.mapfiles", true),
    - HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false),
    - HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000)),
    - HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000)),
    - HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true),
    + HIVETESTMODE("hive.test.mode", false,
    + "Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename."),
    + HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_",
    + "In test mode, specfies prefixes for the output table"),
    + HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32,
    + "In test mode, specfies sampling frequency for table, which is not bucketed,\n" +
    + "For example, the following query:" +
    + " INSERT OVERWRITE TABLE dest" +
    + " SELECT col1 from src" +
    + "would be converted to" +
    + " INSERT OVERWRITE TABLE test_dest" +
    + " SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))"),
    + HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", "",
    + "In test mode, specifies comma separated table names which would not apply sampling"),
    +
    + HIVEMERGEMAPFILES("hive.merge.mapfiles", true,
    + "Merge small files at the end of a map-only job"),
    + HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false,
    + "Merge small files at the end of a map-reduce job"),
    + HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000),
    + "Size of merged files at the end of the job"),
    + HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000),
    + "When the average output file size of a job is less than this number, Hive will start an additional \n" +
    + "map-reduce job to merge the output files into bigger files. This is only done for map-only jobs \n" +
    + "if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true."),
    + HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""),
          HIVEMERGEINPUTFORMATBLOCKLEVEL("hive.merge.input.format.block.level",
    - "org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat"),
    + "org.apache.hadoop.hive.ql.io.rcfile.merge.RCFileBlockMergeInputFormat", ""),
          HIVEMERGECURRENTJOBHASDYNAMICPARTITIONS(
    - "hive.merge.current.job.has.dynamic.partitions", false),
    -
    - HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true),
    - HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true),
    + "hive.merge.current.job.has.dynamic.partitions", false, ""),

    - // Maximum fraction of heap that can be used by ORC file writers
    - HIVE_ORC_FILE_MEMORY_POOL("hive.exec.orc.memory.pool", 0.5f), // 50%
    - // Define the version of the file to write
    - HIVE_ORC_WRITE_FORMAT("hive.exec.orc.write.format", null),
    - // Define the default ORC stripe size
    + HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true,
    + "If this is set the header for RCFiles will simply be RCF. If this is not\n" +
    + "set the header will be that borrowed from sequence files, e.g. SEQ- followed\n" +
    + "by the input and output RCFile formats."),
    + HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true, ""),
    +
    + HIVE_ORC_FILE_MEMORY_POOL("hive.exec.orc.memory.pool", 0.5f,
    + "Maximum fraction of heap that can be used by ORC file writers"),
    + HIVE_ORC_WRITE_FORMAT("hive.exec.orc.write.format", null,
    + "Define the version of the file to write"),
          HIVE_ORC_DEFAULT_STRIPE_SIZE("hive.exec.orc.default.stripe.size",
    - 256L * 1024 * 1024),
    - HIVE_ORC_DICTIONARY_KEY_SIZE_THRESHOLD(
    - "hive.exec.orc.dictionary.key.size.threshold", 0.8f),
    - // Define the default ORC index stride
    - HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE("hive.exec.orc.default.row.index.stride"
    - , 10000),
    - // Define the default ORC buffer size
    - HIVE_ORC_DEFAULT_BUFFER_SIZE("hive.exec.orc.default.buffer.size", 256 * 1024),
    - // Define the default block padding
    - HIVE_ORC_DEFAULT_BLOCK_PADDING("hive.exec.orc.default.block.padding",
    - true),
    - // Define the default compression codec for ORC file
    - HIVE_ORC_DEFAULT_COMPRESS("hive.exec.orc.default.compress", "ZLIB"),
    -
    - HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false),
    - HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE("hive.orc.cache.stripe.details.size", 10000),
    - HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10),
    -
    - HIVESKEWJOIN("hive.optimize.skewjoin", false),
    - HIVECONVERTJOIN("hive.auto.convert.join", true),
    - HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true),
    + 256L * 1024 * 1024,
    + "Define the default ORC stripe size"),
    +
    + HIVE_ORC_DICTIONARY_KEY_SIZE_THRESHOLD("hive.exec.orc.dictionary.key.size.threshold", 0.8f,
    + "If the number of keys in a dictionary is greater than this fraction of the total number of\n" +
    + "non-null rows, turn off dictionary encoding. Use 1 to always use dictionary encoding."),
    + HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE("hive.exec.orc.default.row.index.stride", 10000, "Define the default ORC index stride"),
    + HIVE_ORC_DEFAULT_BUFFER_SIZE("hive.exec.orc.default.buffer.size", 256 * 1024, "Define the default ORC buffer size"),
    + HIVE_ORC_DEFAULT_BLOCK_PADDING("hive.exec.orc.default.block.padding", true, "Define the default block padding"),
    + HIVE_ORC_DEFAULT_COMPRESS("hive.exec.orc.default.compress", "ZLIB", "Define the default compression codec for ORC file"),
    +
    + HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false,
    + "If turned on splits generated by orc will include metadata about the stripes in the file. This\n" +
    + "data is read remotely (from the client or HS2 machine) and sent to all the tasks."),
    + HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE("hive.orc.cache.stripe.details.size", 10000,
    + "Cache size for keeping meta info about orc splits cached in the client."),
    + HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10,
    + "How many threads orc should use to create splits in parallel."),
    +
    + HIVESKEWJOIN("hive.optimize.skewjoin", false,
    + "Whether to enable skew join optimization. \n" +
    + "The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of\n" +
    + "processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce\n" +
    + "job, process those skewed keys. The same key need not be skewed for all the tables, and so,\n" +
    + "the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a\n" +
    + "map-join."),
    + HIVECONVERTJOIN("hive.auto.convert.join", true,
    + "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size"),
    + HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true,
    + "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. \n" +
    + "If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\n" +
    + "specified size, the join is directly converted to a mapjoin (there is no conditional task)."),
          HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD("hive.auto.convert.join.noconditionaltask.size",
    - 10000000L),
    - HIVECONVERTJOINUSENONSTAGED("hive.auto.convert.join.use.nonstaged", true),
    - HIVESKEWJOINKEY("hive.skewjoin.key", 100000),
    - HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000),
    - HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432L), //32M
    -
    - HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000),
    - HIVELIMITMAXROWSIZE("hive.limit.row.max.size", 100000L),
    - HIVELIMITOPTLIMITFILE("hive.limit.optimize.limit.file", 10),
    - HIVELIMITOPTENABLE("hive.limit.optimize.enable", false),
    - HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000),
    - HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", -1f),
    -
    - HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000),
    - HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75),
    - HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55),
    - HIVEHASHTABLEMAXMEMORYUSAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90),
    - HIVEHASHTABLESCALE("hive.mapjoin.check.memory.rows", (long)100000),
    -
    - HIVEDEBUGLOCALTASK("hive.debug.localtask",false),
    -
    - HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat"),
    -
    - HIVEENFORCEBUCKETING("hive.enforce.bucketing", false),
    - HIVEENFORCESORTING("hive.enforce.sorting", false),
    - HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true),
    - HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner"),
    - HIVEENFORCESORTMERGEBUCKETMAPJOIN("hive.enforce.sortmergebucketmapjoin", false),
    - HIVEENFORCEBUCKETMAPJOIN("hive.enforce.bucketmapjoin", false),
    + 10000000L,
    + "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" +
    + "However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, \n" +
    + "the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB"),
    + HIVECONVERTJOINUSENONSTAGED("hive.auto.convert.join.use.nonstaged", true,
    + "For conditional joins, if input stream from a small alias can be directly applied to join operator without \n" +
    + "filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.\n" +
    + "Currently, this is not working with vectorization or tez execution engine."),
    + HIVESKEWJOINKEY("hive.skewjoin.key", 100000,
    + "Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,\n" +
    + "we think the key as a skew join key. "),
    + HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000,
    + "Determine the number of map task used in the follow up map join job for a skew join.\n" +
    + "It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine grained control."),
    + HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432L,
    + "Determine the number of map task at most used in the follow up map join job for a skew join by specifying \n" +
    + "the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine grained control."),
    +
    + HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000,
    + "Send a heartbeat after this interval - used by mapjoin and filter operators"),
    + HIVELIMITMAXROWSIZE("hive.limit.row.max.size", 100000L,
    + "When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least."),
    + HIVELIMITOPTLIMITFILE("hive.limit.optimize.limit.file", 10,
    + "When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample."),
    + HIVELIMITOPTENABLE("hive.limit.optimize.enable", false,
    + "Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first."),
    + HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000,
    + "Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. \n" +
    + "Insert queries are not restricted by this limit."),
    + HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", -1f,
    + "The max memory to be used for hash in RS operator for top K selection."),
    +
    + HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000, ""),
    + HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75, ""),
    + HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55,
    + "This number means how much memory the local task can take to hold the key/value into an in-memory hash table \n" +
    + "when this map join is followed by a group by. If the local task's memory usage is more than this number, \n" +
    + "the local task will abort by itself. It means the data of the small table is too large to be held in memory."),
    + HIVEHASHTABLEMAXMEMORYUSAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90,
    + "This number means how much memory the local task can take to hold the key/value into an in-memory hash table. \n" +
    + "If the local task's memory usage is more than this number, the local task will abort by itself. \n" +
    + "It means the data of the small table is too large to be held in memory."),
    + HIVEHASHTABLESCALE("hive.mapjoin.check.memory.rows", (long)100000,
    + "The number means after how many rows processed it needs to check the memory usage"),
    +
    + HIVEDEBUGLOCALTASK("hive.debug.localtask",false, ""),
    +
    + HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat",
    + "The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat."),
    +
    + HIVEENFORCEBUCKETING("hive.enforce.bucketing", false,
    + "Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced."),
    + HIVEENFORCESORTING("hive.enforce.sorting", false,
    + "Whether sorting is enforced. If true, while inserting into the table, sorting is enforced."),
    + HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true,
    + "If hive.enforce.bucketing or hive.enforce.sorting is true, don't create a reducer for enforcing \n" +
    + "bucketing/sorting for queries of the form: \n" +
    + "insert overwrite table T2 select * from T1;\n" +
    + "where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets."),
    + HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", ""),
    + HIVEENFORCESORTMERGEBUCKETMAPJOIN("hive.enforce.sortmergebucketmapjoin", false,
    + "If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?"),
    + HIVEENFORCEBUCKETMAPJOIN("hive.enforce.bucketmapjoin", false,
    + "If the user asked for bucketed map-side join, and it cannot be performed, \n" +
    + "should the query fail or not ? For example, if the buckets in the tables being joined are\n" +
    + "not a multiple of each other, bucketed map-side join cannot be performed, and the\n" +
    + "query will fail if hive.enforce.bucketmapjoin is set to true."),

    - HIVE_AUTO_SORTMERGE_JOIN("hive.auto.convert.sortmerge.join", false),
    + HIVE_AUTO_SORTMERGE_JOIN("hive.auto.convert.sortmerge.join", false,
    + "Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join."),
          HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR(
              "hive.auto.convert.sortmerge.join.bigtable.selection.policy",
    - "org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ"),
    + "org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ",
    + "The policy to choose the big table for automatic conversion to sort-merge join. \n" +
    + "By default, the table with the largest partitions is assigned the big table. All policies are:\n" +
    + ". based on position of the table - the leftmost table is selected\n" +
    + "org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ.\n" +
    + ". based on total size (all the partitions selected in the query) of the table \n" +
    + "org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ.\n" +
    + ". based on average size (all the partitions selected in the query) of the table \n" +
    + "org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.\n" +
    + "New policies can be added in future."),
          HIVE_AUTO_SORTMERGE_JOIN_TOMAPJOIN(
    - "hive.auto.convert.sortmerge.join.to.mapjoin", false),
    -
    - HIVESCRIPTOPERATORTRUST("hive.exec.script.trust", false),
    - HIVEROWOFFSET("hive.exec.rowoffset", false),
    + "hive.auto.convert.sortmerge.join.to.mapjoin", false,
    + "If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join, \n" +
    + "this parameter decides whether each table should be tried as a big table, and effectively a map-join should be\n" +
    + "tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the\n" +
    + "big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a\n" +
    + "sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted\n" +
    + "and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table\n" +
    + "with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster\n" +
    + "if the complete small table can fit in memory, and a map-join can be performed."),
    +
    + HIVESCRIPTOPERATORTRUST("hive.exec.script.trust", false, ""),
    + HIVEROWOFFSET("hive.exec.rowoffset", false,
    + "Whether to provide the row offset virtual column"),

    - HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE("hive.hadoop.supports.splittable.combineinputformat", false),
    + HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE("hive.hadoop.supports.splittable.combineinputformat", false, ""),

          // Optimizer
    - HIVEOPTINDEXFILTER("hive.optimize.index.filter", false), // automatically use indexes
    - HIVEINDEXAUTOUPDATE("hive.optimize.index.autoupdate", false), //automatically update stale indexes
    - HIVEOPTPPD("hive.optimize.ppd", true), // predicate pushdown
    - HIVEPPDRECOGNIZETRANSITIVITY("hive.ppd.recognizetransivity", true), // predicate pushdown
    - HIVEPPDREMOVEDUPLICATEFILTERS("hive.ppd.remove.duplicatefilters", true),
    - HIVEMETADATAONLYQUERIES("hive.optimize.metadataonly", true),
    - // push predicates down to storage handlers
    - HIVEOPTPPD_STORAGE("hive.optimize.ppd.storage", true),
    - HIVEOPTGROUPBY("hive.optimize.groupby", true), // optimize group by
    - HIVEOPTBUCKETMAPJOIN("hive.optimize.bucketmapjoin", false), // optimize bucket map join
    - HIVEOPTSORTMERGEBUCKETMAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false), // try to use sorted merge bucket map join
    - HIVEOPTREDUCEDEDUPLICATION("hive.optimize.reducededuplication", true),
    - HIVEOPTREDUCEDEDUPLICATIONMINREDUCER("hive.optimize.reducededuplication.min.reducer", 4),
    -
    - HIVESAMPLINGFORORDERBY("hive.optimize.sampling.orderby", false),
    - HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000),
    - HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f),
    + HIVEOPTINDEXFILTER("hive.optimize.index.filter", false,
    + "Whether to enable automatic use of indexes"),
    + HIVEINDEXAUTOUPDATE("hive.optimize.index.autoupdate", false,
    + "Whether to update stale indexes automatically"),
    + HIVEOPTPPD("hive.optimize.ppd", true,
    + "Whether to enable predicate pushdown"),
    + HIVEPPDRECOGNIZETRANSITIVITY("hive.ppd.recognizetransivity", true,
    + "Whether to transitively replicate predicate filters over equijoin conditions."),
    + HIVEPPDREMOVEDUPLICATEFILTERS("hive.ppd.remove.duplicatefilters", true,
    + "Whether to push predicates down into storage handlers. Ignored when hive.optimize.ppd is false."),
    + HIVEMETADATAONLYQUERIES("hive.optimize.metadataonly", true, ""),
    + HIVEOPTPPD_STORAGE("hive.optimize.ppd.storage", true,
    + "Whether to push predicates down to storage handlers"),
    + HIVEOPTGROUPBY("hive.optimize.groupby", true,
    + "Whether to enable the bucketed group by from bucketed partitions/tables."),
    + HIVEOPTBUCKETMAPJOIN("hive.optimize.bucketmapjoin", false,
    + "Whether to try bucket mapjoin"),
    + HIVEOPTSORTMERGEBUCKETMAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false,
    + "Whether to try sorted bucket merge map join"),
    + HIVEOPTREDUCEDEDUPLICATION("hive.optimize.reducededuplication", true,
    + "Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. \n" +
    + "This should always be set to true. Since it is a new feature, it has been made configurable."),
    + HIVEOPTREDUCEDEDUPLICATIONMINREDUCER("hive.optimize.reducededuplication.min.reducer", 4,
    + "Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. \n" +
    + "That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.\n" +
    + "The optimization will be automatically disabled if number of reducers would be less than specified value."),
    +
    + HIVESAMPLINGFORORDERBY("hive.optimize.sampling.orderby", false, ""),
    + HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000, ""),
    + HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, ""),

          // whether to optimize union followed by select followed by filesink
          // It creates sub-directories in the final output, so should not be turned on in systems
          // where MAPREDUCE-1501 is not present
    - HIVE_OPTIMIZE_UNION_REMOVE("hive.optimize.union.remove", false),
    - HIVEOPTCORRELATION("hive.optimize.correlation", false), // exploit intra-query correlations
    -
    - // whether hadoop map-reduce supports sub-directories. It was added by MAPREDUCE-1501.
    - // Some optimizations can only be performed if the version of hadoop being used supports
    - // sub-directories
    - HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES("hive.mapred.supports.subdirectories", false),
    -
    - // optimize skewed join by changing the query plan at compile time
    - HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME("hive.optimize.skewjoin.compiletime", false),
    + HIVE_OPTIMIZE_UNION_REMOVE("hive.optimize.union.remove", false,
    + "Whether to remove the union and push the operators between union and the filesink above union. \n" +
    + "This avoids an extra scan of the output by union. This is independently useful for union\n" +
    + "queries, and specially useful when hive.optimize.skewjoin.compiletime is set to true, since an\n" +
    + "extra union is inserted.\n" +
    + "\n" +
    + "The merge is triggered if either of hive.merge.mapfiles or hive.merge.mapredfiles is set to true.\n" +
    + "If the user has set hive.merge.mapfiles to true and hive.merge.mapredfiles to false, the idea was the\n" +
    + "number of reducers are few, so the number of files anyway are small. However, with this optimization,\n" +
    + "we are increasing the number of files possibly by a big margin. So, we merge aggressively."),
    + HIVEOPTCORRELATION("hive.optimize.correlation", false, "exploit intra-query correlations."),
    +
    + HIVE_HADOOP_SUPPORTS_SUBDIRECTORIES("hive.mapred.supports.subdirectories", false,
    + "Whether the version of Hadoop which is running supports sub-directories for tables/partitions. \n" +
    + "Many Hive optimizations can be applied if the Hadoop version supports sub-directories for\n" +
    + "tables/partitions. It was added by MAPREDUCE-1501"),
    +
    + HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME("hive.optimize.skewjoin.compiletime", false,
    + "Whether to create a separate plan for skewed keys for the tables in the join.\n" +
    + "This is based on the skewed keys stored in the metadata. At compile time, the plan is broken\n" +
    + "into different joins: one for the skewed keys, and the other for the remaining keys. And then,\n" +
    + "a union is performed for the 2 joins generated above. So unless the same skewed key is present\n" +
    + "in both the joined tables, the join for the skewed key will be performed as a map-side join.\n" +
    + "\n" +
    + "The main difference between this parameter and hive.optimize.skewjoin is that this parameter\n" +
    + "uses the skew information stored in the metastore to optimize the plan at compile time itself.\n" +
    + "If there is no skew information in the metadata, this parameter will not have any affect.\n" +
    + "Both hive.optimize.skewjoin.compiletime and hive.optimize.skewjoin should be set to true.\n" +
    + "Ideally, hive.optimize.skewjoin should be renamed as hive.optimize.skewjoin.runtime, but not doing\n" +
    + "so for backward compatibility.\n" +
    + "\n" +
    + "If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime\n" +
    + "would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op."),

          // Indexes
    - HIVEOPTINDEXFILTER_COMPACT_MINSIZE("hive.optimize.index.filter.compact.minsize", (long) 5 * 1024 * 1024 * 1024), // 5G
    - HIVEOPTINDEXFILTER_COMPACT_MAXSIZE("hive.optimize.index.filter.compact.maxsize", (long) -1), // infinity
    - HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES("hive.index.compact.query.max.entries", (long) 10000000), // 10M
    - HIVE_INDEX_COMPACT_QUERY_MAX_SIZE("hive.index.compact.query.max.size", (long) 10 * 1024 * 1024 * 1024), // 10G
    - HIVE_INDEX_COMPACT_BINARY_SEARCH("hive.index.compact.binary.search", true),
    + HIVEOPTINDEXFILTER_COMPACT_MINSIZE("hive.optimize.index.filter.compact.minsize", (long) 5 * 1024 * 1024 * 1024,
    + "Minimum size (in bytes) of the inputs on which a compact index is automatically used."), // 5G
    + HIVEOPTINDEXFILTER_COMPACT_MAXSIZE("hive.optimize.index.filter.compact.maxsize", (long) -1,
    + "Maximum size (in bytes) of the inputs on which a compact index is automatically used. A negative number is equivalent to infinity."), // infinity
    + HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES("hive.index.compact.query.max.entries", (long) 10000000,
    + "The maximum number of index entries to read during a query that uses the compact index. Negative value is equivalent to infinity."), // 10M
    + HIVE_INDEX_COMPACT_QUERY_MAX_SIZE("hive.index.compact.query.max.size", (long) 10 * 1024 * 1024 * 1024,
    + "The maximum number of bytes that a query using the compact index can read. Negative value is equivalent to infinity."), // 10G
    + HIVE_INDEX_COMPACT_BINARY_SEARCH("hive.index.compact.binary.search", true,
    + "Whether or not to use a binary search to find the entries in an index table that match the filter, where possible"),

          // Statistics
    - HIVESTATSAUTOGATHER("hive.stats.autogather", true),
    - HIVESTATSDBCLASS("hive.stats.dbclass", "counter",
    - new PatternValidator("jdbc(:.*)", "hbase", "counter", "custom")), // StatsSetupConst.StatDB
    + HIVESTATSAUTOGATHER("hive.stats.autogather", true,
    + "A flag to gather statistics automatically during the INSERT OVERWRITE command."),
    + HIVESTATSDBCLASS("hive.stats.dbclass", "counter", new PatternSet("jdbc(:.*)", "hbase", "counter", "custom"),
    + "The storage that stores temporary Hive statistics. Currently, jdbc, hbase, counter and custom type are supported."
    + ), // StatsSetupConst.StatDB
          HIVESTATSJDBCDRIVER("hive.stats.jdbcdriver",
    - "org.apache.derby.jdbc.EmbeddedDriver"), // JDBC driver specific to the dbclass
    + "org.apache.derby.jdbc.EmbeddedDriver",
    + "The JDBC driver for the database that stores temporary Hive statistics."),
          HIVESTATSDBCONNECTIONSTRING("hive.stats.dbconnectionstring",
    - "jdbc:derby:;databaseName=TempStatsStore;create=true"), // automatically create database
    - HIVE_STATS_DEFAULT_PUBLISHER("hive.stats.default.publisher",
    - ""), // default stats publisher if none of JDBC/HBase is specified
    - HIVE_STATS_DEFAULT_AGGREGATOR("hive.stats.default.aggregator",
    - ""), // default stats aggregator if none of JDBC/HBase is specified
    - HIVE_STATS_JDBC_TIMEOUT("hive.stats.jdbc.timeout",
    - 30), // default timeout in sec for JDBC connection & SQL statements
    - HIVE_STATS_ATOMIC("hive.stats.atomic",
    - false), // whether to update metastore stats only if all stats are available
    - HIVE_STATS_RETRIES_MAX("hive.stats.retries.max",
    - 0), // maximum # of retries to insert/select/delete the stats DB
    - HIVE_STATS_RETRIES_WAIT("hive.stats.retries.wait",
    - 3000), // # milliseconds to wait before the next retry
    - HIVE_STATS_COLLECT_RAWDATASIZE("hive.stats.collect.rawdatasize", true),
    + "jdbc:derby:;databaseName=TempStatsStore;create=true",
    + "The default connection string for the database that stores temporary Hive statistics."), // automatically create database
    + HIVE_STATS_DEFAULT_PUBLISHER("hive.stats.default.publisher", "",
    + "The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."),
    + HIVE_STATS_DEFAULT_AGGREGATOR("hive.stats.default.aggregator", "",
    + "The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."),
    + HIVE_STATS_JDBC_TIMEOUT("hive.stats.jdbc.timeout", 30,
    + "Timeout value (number of seconds) used by JDBC connection and statements."),
    + HIVE_STATS_ATOMIC("hive.stats.atomic", false,
    + "whether to update metastore stats only if all stats are available"),
    + HIVE_STATS_RETRIES_MAX("hive.stats.retries.max", 0,
    + "Maximum number of retries when stats publisher/aggregator got an exception updating intermediate database. \n" +
    + "Default is no tries on failures."),
    + HIVE_STATS_RETRIES_WAIT("hive.stats.retries.wait", 3000,
    + "The base waiting window (in milliseconds) before the next retry. The actual wait time is calculated by " +

    [... 1006 lines stripped ...]
  • Brock at Feb 18, 2014 at 2:19 am
    Modified: hive/trunk/conf/hive-default.xml.template
    URL: http://svn.apache.org/viewvc/hive/trunk/conf/hive-default.xml.template?rev=1569164&r1=1569163&r2=1569164&view=diff
    ==============================================================================
    --- hive/trunk/conf/hive-default.xml.template (original)
    +++ hive/trunk/conf/hive-default.xml.template Tue Feb 18 02:18:36 2014
    @@ -1,6 +1,5 @@
    -<?xml version="1.0"?>
    -<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
    -<!--
    +<?xml version="1.0" encoding="UTF-8" standalone="no"?>
    +<?xml-stylesheet type="text/xsl" href="configuration.xsl"?><!--
         Licensed to the Apache Software Foundation (ASF) under one or more
         contributor license agreements. See the NOTICE file distributed with
         this work for additional information regarding copyright ownership.
    @@ -15,2333 +14,2461 @@
         WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
         See the License for the specific language governing permissions and
         limitations under the License.
    --->
    -
    -<configuration>
    -
    -<!-- WARNING!!! This file is provided for documentation purposes ONLY! -->
    -<!-- WARNING!!! Any changes you make to this file will be ignored by Hive. -->
    -<!-- WARNING!!! You must make your changes in hive-site.xml instead. -->
    -
    -
    -<!-- Hive Execution Parameters -->
    -<property>
    - <name>mapred.reduce.tasks</name>
    - <value>-1</value>
    - <description>The default number of reduce tasks per job. Typically set
    - to a prime close to the number of available hosts. Ignored when
    - mapred.job.tracker is "local". Hadoop set this to 1 by default, whereas Hive uses -1 as its default value.
    - By setting this property to -1, Hive will automatically figure out what should be the number of reducers.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.exec.reducers.bytes.per.reducer</name>
    - <value>1000000000</value>
    - <description>size per reducer.The default is 1G, i.e if the input size is 10G, it will use 10 reducers.</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.reducers.max</name>
    - <value>999</value>
    - <description>max number of reducers will be used. If the one
    - specified in the configuration parameter mapred.reduce.tasks is
    - negative, Hive will use this one as the max number of reducers when
    - automatically determine number of reducers.</description>
    -</property>
    -
    -<property>
    - <name>hive.cli.print.header</name>
    - <value>false</value>
    - <description>Whether to print the names of the columns in query output.</description>
    -</property>
    -
    -<property>
    - <name>hive.cli.print.current.db</name>
    - <value>false</value>
    - <description>Whether to include the current database in the Hive prompt.</description>
    -</property>
    -
    -<property>
    - <name>hive.cli.prompt</name>
    - <value>hive</value>
    - <description>Command line prompt configuration value. Other hiveconf can be used in
    - this configuration value. Variable substitution will only be invoked at the Hive
    - CLI startup.</description>
    -</property>
    -
    -<property>
    - <name>hive.cli.pretty.output.num.cols</name>
    - <value>-1</value>
    - <description>The number of columns to use when formatting output generated
    - by the DESCRIBE PRETTY table_name command. If the value of this property
    - is -1, then Hive will use the auto-detected terminal width.</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.scratchdir</name>
    - <value>/tmp/hive-${user.name}</value>
    - <description>Scratch space for Hive jobs</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.local.scratchdir</name>
    - <value>/tmp/${user.name}</value>
    - <description>Local scratch space for Hive jobs</description>
    -</property>
    -
    -<property>
    - <name>hive.test.mode</name>
    - <value>false</value>
    - <description>Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.</description>
    -</property>
    -
    -<property>
    - <name>hive.test.mode.prefix</name>
    - <value>test_</value>
    - <description>if Hive is running in test mode, prefixes the output table by this string</description>
    -</property>
    -
    -<!-- If the input table is not bucketed, the denominator of the tablesample is determined by the parameter below -->
    -<!-- For example, the following query: -->
    -<!-- INSERT OVERWRITE TABLE dest -->
    -<!-- SELECT col1 from src -->
    -<!-- would be converted to -->
    -<!-- INSERT OVERWRITE TABLE test_dest -->
    -<!-- SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1)) -->
    -<property>
    - <name>hive.test.mode.samplefreq</name>
    - <value>32</value>
    - <description>if Hive is running in test mode and table is not bucketed, sampling frequency</description>
    -</property>
    -
    -<property>
    - <name>hive.test.mode.nosamplelist</name>
    - <value></value>
    - <description>if Hive is running in test mode, don't sample the above comma separated list of tables</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.uris</name>
    - <value></value>
    - <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
    -</property>
    -
    -<property>
    - <name>javax.jdo.option.ConnectionURL</name>
    - <value>jdbc:derby:;databaseName=metastore_db;create=true</value>
    - <description>JDBC connect string for a JDBC metastore</description>
    -</property>
    -
    -<property>
    - <name>javax.jdo.option.ConnectionDriverName</name>
    - <value>org.apache.derby.jdbc.EmbeddedDriver</value>
    - <description>Driver class name for a JDBC metastore</description>
    -</property>
    -
    -<property>
    - <name>javax.jdo.PersistenceManagerFactoryClass</name>
    - <value>org.datanucleus.api.jdo.JDOPersistenceManagerFactory</value>
    - <description>class implementing the jdo persistence</description>
    -</property>
    -
    -<property>
    - <name>javax.jdo.option.DetachAllOnCommit</name>
    - <value>true</value>
    - <description>detaches all objects from session so that they can be used after transaction is committed</description>
    -</property>
    -
    -<property>
    - <name>javax.jdo.option.NonTransactionalRead</name>
    - <value>true</value>
    - <description>reads outside of transactions</description>
    -</property>
    -
    -<property>
    - <name>javax.jdo.option.ConnectionUserName</name>
    - <value>APP</value>
    - <description>username to use against metastore database</description>
    -</property>
    -
    -<property>
    - <name>javax.jdo.option.ConnectionPassword</name>
    - <value>mine</value>
    - <description>password to use against metastore database</description>
    -</property>
    -
    -<property>
    - <name>javax.jdo.option.Multithreaded</name>
    - <value>true</value>
    - <description>Set this to true if multiple threads access metastore through JDO concurrently.</description>
    -</property>
    -
    -<property>
    - <name>datanucleus.connectionPoolingType</name>
    - <value>BoneCP</value>
    - <description>Uses a BoneCP connection pool for JDBC metastore</description>
    -</property>
    -
    -<property>
    - <name>datanucleus.validateTables</name>
    - <value>false</value>
    - <description>validates existing schema against code. turn this on if you want to verify existing schema </description>
    -</property>
    -
    -<property>
    - <name>datanucleus.validateColumns</name>
    - <value>false</value>
    - <description>validates existing schema against code. turn this on if you want to verify existing schema </description>
    -</property>
    -
    -<property>
    - <name>datanucleus.validateConstraints</name>
    - <value>false</value>
    - <description>validates existing schema against code. turn this on if you want to verify existing schema </description>
    -</property>
    -
    -<property>
    - <name>datanucleus.storeManagerType</name>
    - <value>rdbms</value>
    - <description>metadata store type</description>
    -</property>
    -
    -<property>
    - <name>datanucleus.autoCreateSchema</name>
    - <value>true</value>
    - <description>creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once</description>
    -</property>
    -
    -<property>
    - <name>datanucleus.autoStartMechanismMode</name>
    - <value>checked</value>
    - <description>throw exception if metadata tables are incorrect</description>
    -</property>
    -
    -<property>
    - <name>datanucleus.transactionIsolation</name>
    - <value>read-committed</value>
    - <description>Default transaction isolation level for identity generation. </description>
    -</property>
    -
    -<property>
    - <name>datanucleus.cache.level2</name>
    - <value>false</value>
    - <description>Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server</description>
    -</property>
    -
    -<property>
    - <name>datanucleus.cache.level2.type</name>
    - <value>SOFT</value>
    - <description>SOFT=soft reference based cache, WEAK=weak reference based cache.</description>
    -</property>
    -
    -<property>
    - <name>datanucleus.identifierFactory</name>
    - <value>datanucleus1</value>
    - <description>Name of the identifier factory to use when generating table/column names etc. 'datanucleus1' is used for backward compatibility with DataNucleus v1</description>
    -</property>
    -
    -<property>
    - <name>datanucleus.plugin.pluginRegistryBundleCheck</name>
    - <value>LOG</value>
    - <description>Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.warehouse.dir</name>
    - <value>/user/hive/warehouse</value>
    - <description>location of default database for the warehouse</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.execute.setugi</name>
    - <value>false</value>
    - <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.event.listeners</name>
    - <value></value>
    - <description>list of comma separated listeners for metastore events.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.partition.inherit.table.properties</name>
    - <value></value>
    - <description>list of comma separated keys occurring in table properties which will get inherited to newly created partitions. * implies all the keys will get inherited.</description>
    -</property>
    -
    -<property>
    - <name>hive.metadata.export.location</name>
    - <value></value>
    - <description>When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, it is the location to which the metadata will be exported. The default is an empty string, which results in the metadata being exported to the current user's home directory on HDFS.</description>
    -</property>
    -
    -<property>
    - <name>hive.metadata.move.exported.metadata.to.trash</name>
    - <value></value>
    - <description>When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.partition.name.whitelist.pattern</name>
    - <value></value>
    - <description>Partition names will be checked against this regex pattern and rejected if not matched.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.disallow.incompatible.col.type.change</name>
    - <value></value>
    - <description>If true (default is false), ALTER TABLE operations which change the type of
    - a column (say STRING) to an incompatible type (say MAP&lt;STRING, STRING&gt;) are disallowed.
    - RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the
    - datatypes can be converted from string to any type. The map is also serialized as
    - a string, which can be read as a string as well. However, with any binary
    - serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions
    - when subsequently trying to access old partitions.
    +--><configuration>
    + <!-- WARNING!!! This file is auto generated for documentation purposes ONLY! -->
    + <!-- WARNING!!! Any changes you make to this file will be ignored by Hive. -->
    + <!-- WARNING!!! You must make your changes in hive-site.xml instead. -->
    + <!-- Hive Execution Parameters -->
    + <property>
    + <key>hive.exec.script.wrapper</key>
    + <value/>
    + <description/>
    + </property>
    + <property>
    + <key>hive.exec.plan</key>
    + <value/>
    + <description/>
    + </property>
    + <property>
    + <key>hive.plan.serialization.format</key>
    + <value>kryo</value>
    + <description>
    + Query plan format serialization between client and task nodes.
    + </description>
    + </property>
    + <property>
    + <key>hive.exec.scratchdir</key>
    + <value>/tmp/hive-noland</value>
    + <description>Scratch space for Hive jobs</description>
    + </property>
    + <property>
    + <key>hive.exec.local.scratchdir</key>
    + <value>/var/folders/6l/2kf3r2pj1t176h2nhdwfpk1r0000gp/T//noland</value>
    + <description>Local scratch space for Hive jobs</description>
    + </property>
    + <property>
    + <key>hive.scratch.dir.permission</key>
    + <value>700</value>
    + <description/>
    + </property>
    + <property>
    + <key>hive.exec.submitviachild</key>
    + <value>false</value>
    + <description/>
    + </property>
    + <property>
    + <key>hive.exec.script.maxerrsize</key>
    + <value>100000</value>
    + <description>
    + Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task).
    + </description>
    + </property>
    + <property>
    + <key>hive.exec.script.allow.partial.consumption</key>
    + <value>false</value>
    + <description>When enabled, this option allows a user script to exit successfully without consuming all the data from the standard input.</description>
    + </property>
    + <property>
    + <key>stream.stderr.reporter.prefix</key>
    + <value>reporter:</value>
    + <description>Streaming jobs that log to standard error with this prefix can log counter or status information.</description>
    + </property>
    + <property>
    + <key>stream.stderr.reporter.enabled</key>
    + <value>true</value>
    + <description>Enable consumption of status and counter messages for streaming jobs.</description>
    + </property>
    + <property>
    + <key>hive.exec.compress.output</key>
    + <value>false</value>
    + <description>
    + This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed.
    + </description>
    + </property>
    + <property>
    + <key>hive.exec.compress.intermediate</key>
    + <value>false</value>
    + <description>
    + This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed.
    + </description>
    + </property>
    + <property>
    + <key>hive.intermediate.compression.codec</key>
    + <value/>
    + <description/>
    + </property>
    + <property>
    + <key>hive.intermediate.compression.type</key>
    + <value/>
    + <description/>
    + </property>
    + <property>
    + <key>hive.exec.reducers.bytes.per.reducer</key>
    + <value>1000000000</value>
    + <description>size per reducer.The default is 1G, i.e if the input size is 10G, it will use 10 reducers.</description>
    + </property>
    + <property>
    + <key>hive.exec.reducers.max</key>
    + <value>999</value>
    + <description>
    + max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is
    + </description>
    + </property>
    + <property>
    + <key>hive.exec.pre.hooks</key>
    + <value/>
    + <description>
    + Comma-separated list of pre-execution hooks to be invoked for each statement.
    + A pre-execution hook is specified as the name of a Java class which implements the
    + </description>
    + </property>
    + <property>
    + <key>hive.exec.post.hooks</key>
    + <value/>
    + <description>
    + Comma-separated list of post-execution hooks to be invoked for each statement.
    + A post-execution hook is specified as the name of a Java class which implements the
    + </description>
    + </property>
    + <property>
    + <key>hive.exec.failure.hooks</key>
    + <value/>
    + <description>
    + Comma-separated list of on-failure hooks to be invoked for each statement.
    + An on-failure hook is specified as the name of Java class which implements the
    + </description>
    + </property>
    + <property>
    + <key>hive.client.stats.publishers</key>
    + <value/>
    + <description>
    + Comma-separated list of statistics publishers to be invoked on counters on each job.
    + A client stats publisher is specified as the name of a Java class which implements the
    + </description>
    + </property>
    + <property>
    + <key>hive.exec.parallel</key>
    + <value>false</value>
    + <description>Whether to execute jobs in parallel</description>
    + </property>
    + <property>
    + <key>hive.exec.parallel.thread.number</key>
    + <value>8</value>
    + <description>How many jobs at most can be executed in parallel</description>
    + </property>
    + <property>
    + <key>hive.mapred.reduce.tasks.speculative.execution</key>
    + <value>true</value>
    + <description>Whether speculative execution for reducers should be turned on. </description>
    + </property>
    + <property>
    + <key>hive.exec.counters.pull.interval</key>
    + <value>1000</value>
    + <description>
    + The interval with which to poll the JobTracker for the counters the running job.
    + </description>
    + </property>
    + <property>
    + <key>hive.exec.dynamic.partition</key>
    + <value>true</value>
    + <description>Whether or not to allow dynamic partitions in DML/DDL.</description>
    + </property>
    + <property>
    + <key>hive.exec.dynamic.partition.mode</key>
    + <value>strict</value>
    + <description>In strict mode, the user must specify at least one static partition in case the user accidentally overwrites all partitions.</description>
    + </property>
    + <property>
    + <key>hive.exec.max.dynamic.partitions</key>
    + <value>1000</value>
    + <description>Maximum number of dynamic partitions allowed to be created in total.</description>
    + </property>
    + <property>
    + <key>hive.exec.max.dynamic.partitions.pernode</key>
    + <value>100</value>
    + <description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
    + </property>
    + <property>
    + <key>hive.exec.max.created.files</key>
    + <value>100000</value>
    + <description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
    + </property>
    + <property>
    + <key>hive.downloaded.resources.dir</key>
    + <value>/var/folders/6l/2kf3r2pj1t176h2nhdwfpk1r0000gp/T//${hive.session.id}_resources</value>
    + <description>
    + The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped.
    + This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc).
    + </description>
    + </property>
    + <property>
    + <key>hive.exec.default.partition.name</key>
    + <value>__HIVE_DEFAULT_PARTITION__</value>
    + <description/>
    + </property>
    + <property>
    + <key>hive.lockmgr.zookeeper.default.partition.name</key>
    + <value>__HIVE_DEFAULT_ZOOKEEPER_PARTITION__</value>
    + <description/>
    + </property>
    + <property>
    + <key>hive.exec.show.job.failure.debug.info</key>
    + <value>true</value>
    + <description>
    + If a job fails, whether to provide a link in the CLI to the task with the
    + </description>
    + </property>
    + <property>
    + <key>hive.exec.job.debug.capture.stacktraces</key>
    + <value>true</value>
    + <description>
    + Whether or not stack traces parsed from the task logs of a sampled failed task
    + </description>
    + </property>
    + <property>
    + <key>hive.exec.job.debug.timeout</key>
    + <value>30000</value>
    + <description/>
    + </property>
    + <property>
    + <key>hive.exec.tasklog.debug.timeout</key>
    + <value>20000</value>
    + <description/>
    + </property>
    + <property>
    + <key>hive.output.file.extension</key>
    + <value/>
    + <description>String used as a file extension for output files. If not set, defaults to the codec extension for text files (e.g. ".gz"), or no extension otherwise.</description>
    + </property>
    + <property>
    + <key>hive.exec.mode.local.auto</key>
    + <value>false</value>
    + <description>Let Hive determine whether to run in local mode automatically</description>
    + </property>
    + <property>
    + <key>hive.exec.mode.local.auto.inputbytes.max</key>
    + <value>134217728</value>
    + <description>When hive.exec.mode.local.auto is true, input bytes should less than this for local mode.</description>
    + </property>
    + <property>
    + <key>hive.exec.mode.local.auto.input.files.max</key>
    + <value>4</value>
    + <description>When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode.</description>
    + </property>
    + <property>
    + <key>hive.exec.drop.ignorenonexistent</key>
    + <value>true</value>
    + <description>Do not report an error if DROP TABLE/VIEW specifies a non-existent table/view</description>
    + </property>
    + <property>
    + <key>hive.ignore.mapjoin.hint</key>
    + <value>true</value>
    + <description>Ignore the mapjoin hint</description>
    + </property>
    + <property>
    + <key>hive.file.max.footer</key>
    + <value>100</value>
    + <description>maximum number of lines for footer user can define for a table file</description>
    + </property>
    + <property>
    + <key>hadoop.bin.path</key>
    + <value>/usr/bin/hadoop</value>
    + <description/>
    + </property>
    + <property>
    + <key>fs.har.impl</key>
    + <value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value>
    + <description>The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20</description>
    + </property>
    + <property>
    + <key>hive.metastore.metadb.dir</key>
    + <value/>
    + <description/>
    + </property>
    + <property>
    + <key>hive.metastore.warehouse.dir</key>
    + <value>/user/hive/warehouse</value>
    + <description>location of default database for the warehouse</description>
    + </property>
    + <property>
    + <key>hive.metastore.uris</key>
    + <value/>
    + <description>Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore.</description>
    + </property>
    + <property>
    + <key>hive.metastore.connect.retries</key>
    + <value>3</value>
    + <description>Number of retries while opening a connection to metastore</description>
    + </property>
    + <property>
    + <key>hive.metastore.failure.retries</key>
    + <value>1</value>
    + <description>Number of retries upon failure of Thrift metastore calls</description>
    + </property>
    + <property>
    + <key>hive.metastore.client.connect.retry.delay</key>
    + <value>1</value>
    + <description>Number of seconds for the client to wait between consecutive connection attempts</description>
    + </property>
    + <property>
    + <key>hive.metastore.client.socket.timeout</key>
    + <value>20</value>
    + <description>MetaStore Client socket timeout in seconds</description>
    + </property>
    + <property>
    + <key>javax.jdo.option.ConnectionPassword</key>
    + <value>mine</value>
    + <description>password to use against metastore database</description>
    + </property>
    + <property>
    + <key>hive.metastore.ds.connection.url.hook</key>
    + <value/>
    + <description>Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used</description>
    + </property>
    + <property>
    + <key>javax.jdo.option.Multithreaded</key>
    + <value>true</value>
    + <description>Set this to true if multiple threads access metastore through JDO concurrently.</description>
    + </property>
    + <property>
    + <key>javax.jdo.option.ConnectionURL</key>
    + <value>jdbc:derby:;databaseName=metastore_db;create=true</value>
    + <description>JDBC connect string for a JDBC metastore</description>
    + </property>
    + <property>
    + <key>hive.metastore.force.reload.conf</key>
    + <value>false</value>
    + <description>
    + Whether to force reloading of the metastore configuration (including
    + the connection URL, before the next metastore query that accesses the
    + datastore. Once reloaded, this value is reset to false. Used for
    + testing only.
    + </description>
    + </property>
    + <property>
    + <key>hive.hmshandler.retry.attempts</key>
    + <value>1</value>
    + <description>The number of times to retry a HMSHandler call if there were a connection error</description>
    + </property>
    + <property>
    + <key>hive.hmshandler.retry.interval</key>
    + <value>1000</value>
    + <description>The number of milliseconds between HMSHandler retry attempts</description>
    + </property>
    + <property>
    + <key>hive.hmshandler.force.reload.conf</key>
    + <value>false</value>
    + <description>
    + Whether to force reloading of the HMSHandler configuration (including
    + the connection URL, before the next metastore query that accesses the
    + datastore. Once reloaded, this value is reset to false. Used for
    + testing only.
    + </description>
    + </property>
    + <property>
    + <key>hive.metastore.server.min.threads</key>
    + <value>200</value>
    + <description>Minimum number of worker threads in the Thrift server's pool.</description>
    + </property>
    + <property>
    + <key>hive.metastore.server.max.threads</key>
    + <value>100000</value>
    + <description>Maximum number of worker threads in the Thrift server's pool.</description>
    + </property>
    + <property>
    + <key>hive.metastore.server.tcp.keepalive</key>
    + <value>true</value>
    + <description>Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections.</description>
    + </property>
    + <property>
    + <key>hive.metastore.archive.intermediate.original</key>
    + <value>_INTERMEDIATE_ORIGINAL</value>
    + <description>
    + Intermediate dir suffixes used for archiving. Not important what they
    + are, as long as collisions are avoided
    + </description>
    + </property>
    + <property>
    + <key>hive.metastore.archive.intermediate.archived</key>
    + <value>_INTERMEDIATE_ARCHIVED</value>
    + <description/>
    + </property>
    + <property>
    + <key>hive.metastore.archive.intermediate.extracted</key>
    + <value>_INTERMEDIATE_EXTRACTED</value>
    + <description/>
    + </property>
    + <property>
    + <key>hive.metastore.kerberos.keytab.file</key>
    + <value/>
    + <description>The path to the Kerberos Keytab file containing the metastore Thrift server's service principal.</description>
    + </property>
    + <property>
    + <key>hive.metastore.kerberos.principal</key>
    + <value>hive-metastore/_HOST@EXAMPLE.COM</value>
    + <description>The service principal for the metastore Thrift server. The special string _HOST will be replaced automatically with the correct host name.</description>
    + </property>
    + <property>
    + <key>hive.metastore.sasl.enabled</key>
    + <value>false</value>
    + <description>If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos.</description>
    + </property>
    + <property>
    + <key>hive.metastore.thrift.framed.transport.enabled</key>
    + <value>false</value>
    + <description>If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used.</description>
    + </property>
    + <property>
    + <key>hive.cluster.delegation.token.store.class</key>
    + <value>org.apache.hadoop.hive.thrift.MemoryTokenStore</value>
    + <description>The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.</description>
    + </property>
    + <property>
    + <key>hive.cluster.delegation.token.store.zookeeper.connectString</key>
    + <value/>
    + <description>The ZooKeeper token store connect string.</description>
    + </property>
    + <property>
    + <key>hive.cluster.delegation.token.store.zookeeper.znode</key>
    + <value>/hive/cluster/delegation</value>
    + <description>The root path for token store data.</description>
    + </property>
    + <property>
    + <key>hive.cluster.delegation.token.store.zookeeper.acl</key>
    + <value/>
    + <description>ACL for token store entries. List comma separated all server principals for the cluster.</description>
    + </property>
    + <property>
    + <key>hive.metastore.cache.pinobjtypes</key>
    + <value>Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order</value>
    + <description>List of comma separated metastore object types that should be pinned in the cache</description>
    + </property>
    + <property>
    + <key>datanucleus.connectionPoolingType</key>
    + <value>BONECP</value>
    + <description>Specify connection pool library for datanucleus</description>
    + </property>
    + <property>
    + <key>datanucleus.validateTables</key>
    + <value>false</value>
    + <description>validates existing schema against code. turn this on if you want to verify existing schema</description>
    + </property>
    + <property>
    + <key>datanucleus.validateColumns</key>
    + <value>false</value>
    + <description>validates existing schema against code. turn this on if you want to verify existing schema</description>
    + </property>
    + <property>
    + <key>datanucleus.validateConstraints</key>
    + <value>false</value>
    + <description>validates existing schema against code. turn this on if you want to verify existing schema</description>
    + </property>
    + <property>
    + <key>datanucleus.storeManagerType</key>
    + <value>rdbms</value>
    + <description>metadata store type</description>
    + </property>
    + <property>
    + <key>datanucleus.autoCreateSchema</key>
    + <value>true</value>
    + <description>creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once</description>
    + </property>
    + <property>
    + <key>datanucleus.fixedDatastore</key>
    + <value>false</value>
    + <description/>
    + </property>
    + <property>
    + <key>hive.metastore.schema.verification</key>
    + <value>false</value>
    + <description>
    + Enforce metastore schema version consistency.
    + True: Verify that version information stored in metastore matches with one from Hive jars. Also disable automatic
    + schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures
    + proper metastore schema migration. (Default)
    + </description>
    + </property>
    + <property>
    + <key>datanucleus.autoStartMechanismMode</key>
    + <value>checked</value>
    + <description>throw exception if metadata tables are incorrect</description>
    + </property>
    + <property>
    + <key>datanucleus.transactionIsolation</key>
    + <value>read-committed</value>
    + <description>Default transaction isolation level for identity generation.</description>
    + </property>
    + <property>
    + <key>datanucleus.cache.level2</key>
    + <value>false</value>
    + <description>Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server</description>
    + </property>
    + <property>
    + <key>datanucleus.cache.level2.type</key>
    + <value>none</value>
    + <description/>
    + </property>
    + <property>
    + <key>datanucleus.identifierFactory</key>
    + <value>datanucleus1</value>
    + <description>
    + Name of the identifier factory to use when generating table/column names etc.
    + </description>
    + </property>
    + <property>
    + <key>datanucleus.rdbms.useLegacyNativeValueStrategy</key>
    + <value>true</value>
    + <description/>
    + </property>
    + <property>
    + <key>datanucleus.plugin.pluginRegistryBundleCheck</key>
    + <value>LOG</value>
    + <description>Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]</description>
    + </property>
    + <property>
    + <key>hive.metastore.batch.retrieve.max</key>
    + <value>300</value>
    + <description>
    + Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch.
    + The higher the number, the less the number of round trips is needed to the Hive metastore server,
    + </description>
    + </property>
    + <property>
    + <key>hive.metastore.batch.retrieve.table.partition.max</key>
    + <value>1000</value>
    + <description>Maximum number of table partitions that metastore internally retrieves in one batch.</description>
    + </property>
    + <property>
    + <key>hive.metastore.init.hooks</key>
    + <value/>
    + <description>
    + A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization.
    + </description>
    + </property>
    + <property>
    + <key>hive.metastore.pre.event.listeners</key>
    + <value/>
    + <description>List of comma separated listeners for metastore events.</description>
    + </property>
    + <property>
    + <key>hive.metastore.event.listeners</key>
    + <value/>
    + <description/>
    + </property>
    + <property>
    + <key>hive.metastore.authorization.storage.checks</key>
    + <value>false</value>
    + <description>
    + Should the metastore do authorization checks against the underlying storage (usually hdfs)
    + for operations like drop-partition (disallow the drop-partition if the user in
    + question doesn't have permissions to delete the corresponding directory
    + on the storage).
    + </description>
    + </property>
    + <property>
    + <key>hive.metastore.event.clean.freq</key>
    + <value>0</value>
    + <description>Frequency at which timer task runs to purge expired events in metastore(in seconds).</description>
    + </property>
    + <property>
    + <key>hive.metastore.event.expiry.duration</key>
    + <value>0</value>
    + <description>Duration after which events expire from events table (in seconds)</description>
    + </property>
    + <property>
    + <key>hive.metastore.execute.setugi</key>
    + <value>false</value>
    + <description>
    + In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using
    + the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort.
    + </description>
    + </property>
    + <property>
    + <key>hive.metastore.partition.name.whitelist.pattern</key>
    + <value/>
    + <description>Partition names will be checked against this regex pattern and rejected if not matched.</description>
    + </property>
    + <property>
    + <key>hive.metastore.integral.jdo.pushdown</key>
    + <value>false</value>
    + <description>
    + Allow JDO query pushdown for integral partition columns in metastore. Off by default. This
    + improves metastore perf for integral columns, especially if there's a large number of partitions.
    + However, it doesn't work correctly with integral values that are not normalized (e.g. have
    + leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization
    + </description>
    + </property>
    + <property>
    + <key>hive.metastore.try.direct.sql</key>
    + <value>true</value>
    + <description/>
    + </property>
    + <property>
    + <key>hive.metastore.try.direct.sql.ddl</key>
    + <value>true</value>
    + <description/>
    + </property>
    + <property>
    + <key>hive.metastore.disallow.incompatible.col.type.changes</key>
    + <value>false</value>
    + <description>
    + If true (default is false), ALTER TABLE operations which change the type of
    + a column (say STRING) to an incompatible type (say MAP&amp;lt;STRING, STRING&amp;gt;) are disallowed.
    + RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the
    + datatypes can be converted from string to any type. The map is also serialized as
    + a string, which can be read as a string as well. However, with any binary
    + serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions
    + when subsequently trying to access old partitions.

    - Primitive types like INT, STRING, BIGINT, etc are compatible with each other and are
    - not blocked.
    -
    - See HIVE-4409 for more details.
    -</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.end.function.listeners</name>
    - <value></value>
    - <description>list of comma separated listeners for the end of metastore functions.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.event.expiry.duration</name>
    - <value>0</value>
    - <description>Duration after which events expire from events table (in seconds)</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.event.clean.freq</name>
    - <value>0</value>
    - <description>Frequency at which timer task runs to purge expired events in metastore(in seconds).</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.connect.retries</name>
    - <value>5</value>
    - <description>Number of retries while opening a connection to metastore</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.failure.retries</name>
    - <value>3</value>
    - <description>Number of retries upon failure of Thrift metastore calls</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.client.connect.retry.delay</name>
    - <value>1</value>
    - <description>Number of seconds for the client to wait between consecutive connection attempts</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.client.socket.timeout</name>
    - <value>20</value>
    - <description>MetaStore Client socket timeout in seconds</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.rawstore.impl</name>
    - <value>org.apache.hadoop.hive.metastore.ObjectStore</value>
    - <description>Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. This class is used to store and retrieval of raw metadata objects such as table, database</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.batch.retrieve.max</name>
    - <value>300</value>
    - <description>Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. The higher the number, the less the number of round trips is needed to the Hive metastore server, but it may also cause higher memory requirement at the client side.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.batch.retrieve.table.partition.max</name>
    - <value>1000</value>
    - <description>Maximum number of table partitions that metastore internally retrieves in one batch.</description>
    -</property>
    -
    -<property>
    - <name>hive.default.fileformat</name>
    - <value>TextFile</value>
    - <description>Default file format for CREATE TABLE statement. Options are TextFile and SequenceFile. Users can explicitly say CREATE TABLE ... STORED AS &lt;TEXTFILE|SEQUENCEFILE&gt; to override</description>
    -</property>
    -
    -<property>
    - <name>hive.default.rcfile.serde</name>
    - <value>org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe</value>
    - <description>The default SerDe Hive will use for the RCFile format</description>
    -</property>
    -
    -<property>
    - <name>hive.fileformat.check</name>
    - <value>true</value>
    - <description>Whether to check file format or not when loading data files</description>
    -</property>
    -
    -<property>
    - <name>hive.file.max.footer</name>
    - <value>100</value>
    - <description>maximum number of lines for footer user can define for a table file</description>
    -</property>
    -
    -<property>
    - <name>hive.map.aggr</name>
    - <value>true</value>
    - <description>Whether to use map-side aggregation in Hive Group By queries</description>
    -</property>
    -
    -<property>
    - <name>hive.groupby.skewindata</name>
    - <value>false</value>
    - <description>Whether there is skew in data to optimize group by queries</description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.multigroupby.common.distincts</name>
    - <value>true</value>
    - <description>Whether to optimize a multi-groupby query with the same distinct.
    - Consider a query like:
    -
    - from src
    - insert overwrite table dest1 select col1, count(distinct colx) group by col1
    - insert overwrite table dest2 select col2, count(distinct colx) group by col2;
    -
    - With this parameter set to true, first we spray by the distinct value (colx), and then
    - perform the 2 groups bys. This makes sense if map-side aggregation is turned off. However,
    - with maps-side aggregation, it might be useful in some cases to treat the 2 inserts independently,
    - thereby performing the query above in 2MR jobs instead of 3 (due to spraying by distinct key first).
    - If this parameter is turned off, we don't consider the fact that the distinct key is the same across
    - different MR jobs.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.groupby.mapaggr.checkinterval</name>
    - <value>100000</value>
    - <description>Number of rows after which size of the grouping keys/aggregation classes is performed</description>
    -</property>
    -
    -<property>
    - <name>hive.mapred.local.mem</name>
    - <value>0</value>
    - <description>For local mode, memory of the mappers/reducers</description>
    -</property>
    -
    -<property>
    - <name>hive.mapjoin.followby.map.aggr.hash.percentmemory</name>
    - <value>0.3</value>
    - <description>Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join</description>
    -</property>
    -
    -<property>
    - <name>hive.map.aggr.hash.force.flush.memory.threshold</name>
    - <value>0.9</value>
    - <description>The max memory to be used by map-side group aggregation hash table, if the memory usage is higher than this number, force to flush data</description>
    -</property>
    -
    -<property>
    - <name>hive.map.aggr.hash.percentmemory</name>
    - <value>0.5</value>
    - <description>Portion of total memory to be used by map-side group aggregation hash table</description>
    -</property>
    -
    -<property>
    - <name>hive.session.history.enabled</name>
    - <value>false</value>
    - <description>Whether to log Hive query, query plan, runtime statistics etc.</description>
    -</property>
    -
    -<property>
    - <name>hive.map.aggr.hash.min.reduction</name>
    - <value>0.5</value>
    - <description>Hash aggregation will be turned off if the ratio between hash
    - table size and input rows is bigger than this number. Set to 1 to make sure
    - hash aggregation is never turned off.</description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.index.filter</name>
    - <value>false</value>
    - <description>Whether to enable automatic use of indexes</description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.index.groupby</name>
    - <value>false</value>
    - <description>Whether to enable optimization of group-by queries using Aggregate indexes.</description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.ppd</name>
    - <value>true</value>
    - <description>Whether to enable predicate pushdown</description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.ppd.storage</name>
    - <value>true</value>
    - <description>Whether to push predicates down into storage handlers. Ignored when hive.optimize.ppd is false.</description>
    -</property>
    -
    -<property>
    - <name>hive.ppd.recognizetransivity</name>
    - <value>true</value>
    - <description>Whether to transitively replicate predicate filters over equijoin conditions.</description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.groupby</name>
    - <value>true</value>
    - <description>Whether to enable the bucketed group by from bucketed partitions/tables.</description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.skewjoin.compiletime</name>
    - <value>false</value>
    - <description>Whether to create a separate plan for skewed keys for the tables in the join.
    - This is based on the skewed keys stored in the metadata. At compile time, the plan is broken
    - into different joins: one for the skewed keys, and the other for the remaining keys. And then,
    - a union is performed for the 2 joins generated above. So unless the same skewed key is present
    - in both the joined tables, the join for the skewed key will be performed as a map-side join.
    -
    - The main difference between this parameter and hive.optimize.skewjoin is that this parameter
    - uses the skew information stored in the metastore to optimize the plan at compile time itself.
    - If there is no skew information in the metadata, this parameter will not have any affect.
    - Both hive.optimize.skewjoin.compiletime and hive.optimize.skewjoin should be set to true.
    - Ideally, hive.optimize.skewjoin should be renamed as hive.optimize.skewjoin.runtime, but not doing
    - so for backward compatibility.
    -
    - If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime
    - would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.union.remove</name>
    - <value>false</value>
    - <description>
    - Whether to remove the union and push the operators between union and the filesink above
    - union. This avoids an extra scan of the output by union. This is independently useful for union
    - queries, and specially useful when hive.optimize.skewjoin.compiletime is set to true, since an
    - extra union is inserted.
    -
    - The merge is triggered if either of hive.merge.mapfiles or hive.merge.mapredfiles is set to true.
    - If the user has set hive.merge.mapfiles to true and hive.merge.mapredfiles to false, the idea was the
    - number of reducers are few, so the number of files anyway are small. However, with this optimization,
    - we are increasing the number of files possibly by a big margin. So, we merge aggressively.</description>
    -</property>
    -
    -<property>
    - <name>hive.mapred.supports.subdirectories</name>
    - <value>false</value>
    - <description>Whether the version of Hadoop which is running supports sub-directories for tables/partitions.
    - Many Hive optimizations can be applied if the Hadoop version supports sub-directories for
    - tables/partitions. It was added by MAPREDUCE-1501</description>
    -</property>
    -
    -<property>
    - <name>hive.multigroupby.singlereducer</name>
    - <value>false</value>
    - <description>Whether to optimize multi group by query to generate single M/R
    - job plan. If the multi group by query has common group by keys, it will be
    - optimized to generate single M/R job.</description>
    -</property>
    -
    -<property>
    - <name>hive.map.groupby.sorted</name>
    - <value>false</value>
    - <description>If the bucketing/sorting properties of the table exactly match the grouping key, whether to
    - perform the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this
    - is that it limits the number of mappers to the number of files.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.map.groupby.sorted.testmode</name>
    - <value>false</value>
    - <description>If the bucketing/sorting properties of the table exactly match the grouping key, whether to
    - perform the group by in the mapper by using BucketizedHiveInputFormat. If the test mode is set, the plan
    - is not converted, but a query property is set to denote the same.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.new.job.grouping.set.cardinality</name>
    - <value>30</value>
    - <description>
    - Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.
    - For a query like: select a, b, c, count(1) from T group by a, b, c with rollup;
    - 4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null).
    - This can lead to explosion across map-reduce boundary if the cardinality of T is very high,
    - and map-side aggregation does not do a very good job.
    -
    - This parameter decides if Hive should add an additional map-reduce job. If the grouping set
    - cardinality (4 in the example above), is more than this value, a new MR job is added under the
    - assumption that the original group by will reduce the data size.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.join.emit.interval</name>
    - <value>1000</value>
    - <description>How many rows in the right-most join operand Hive should buffer before emitting the join result.</description>
    -</property>
    -
    -<property>
    - <name>hive.join.cache.size</name>
    - <value>25000</value>
    - <description>How many rows in the joining tables (except the streaming table) should be cached in memory. </description>
    -</property>
    -
    -<property>
    - <name>hive.smbjoin.cache.rows</name>
    - <value>10000</value>
    - <description>How many rows with the same key value should be cached in memory per smb joined table. </description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.skewjoin</name>
    - <value>false</value>
    - <description>Whether to enable skew join optimization.
    - The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of
    - processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce
    - job, process those skewed keys. The same key need not be skewed for all the tables, and so,
    - the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a
    - map-join.
    -</description>
    -</property>
    -
    -<property>
    - <name>hive.skewjoin.key</name>
    - <value>100000</value>
    - <description>Determine if we get a skew key in join. If we see more
    - than the specified number of rows with the same key in join operator,
    - we think the key as a skew join key. </description>
    -</property>
    -
    -<property>
    - <name>hive.skewjoin.mapjoin.map.tasks</name>
    - <value>10000</value>
    - <description> Determine the number of map task used in the follow up map join job
    - for a skew join. It should be used together with hive.skewjoin.mapjoin.min.split
    - to perform a fine grained control.</description>
    -</property>
    -
    -<property>
    - <name>hive.skewjoin.mapjoin.min.split</name>
    - <value>33554432</value>
    - <description> Determine the number of map task at most used in the follow up map join job
    - for a skew join by specifying the minimum split size. It should be used together with
    - hive.skewjoin.mapjoin.map.tasks to perform a fine grained control.</description>
    -</property>
    -
    -<property>
    - <name>hive.mapred.mode</name>
    - <value>nonstrict</value>
    - <description>The mode in which the Hive operations are being performed.
    - In strict mode, some risky queries are not allowed to run. They include:
    - Cartesian Product.
    - No partition being picked up for a query.
    - Comparing bigints and strings.
    - Comparing bigints and doubles.
    - Orderby without limit.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.enforce.bucketmapjoin</name>
    - <value>false</value>
    - <description>If the user asked for bucketed map-side join, and it cannot be performed,
    - should the query fail or not ? For example, if the buckets in the tables being joined are
    - not a multiple of each other, bucketed map-side join cannot be performed, and the
    - query will fail if hive.enforce.bucketmapjoin is set to true.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.exec.script.maxerrsize</name>
    - <value>100000</value>
    - <description>Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). This prevents runaway scripts from filling logs partitions to capacity </description>
    -</property>
    -
    -<property>
    - <name>hive.exec.script.allow.partial.consumption</name>
    - <value>false</value>
    - <description> When enabled, this option allows a user script to exit successfully without consuming all the data from the standard input.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.script.operator.id.env.var</name>
    - <value>HIVE_SCRIPT_OPERATOR_ID</value>
    - <description> Name of the environment variable that holds the unique script operator ID in the user's transform function (the custom mapper/reducer that the user has specified in the query)
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.script.operator.truncate.env</name>
    - <value>false</value>
    - <description>Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.compress.output</name>
    - <value>false</value>
    - <description> This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. The compression codec and other options are determined from Hadoop config variables mapred.output.compress* </description>
    -</property>
    -
    -<property>
    - <name>hive.exec.compress.intermediate</name>
    - <value>false</value>
    - <description> This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. The compression codec and other options are determined from Hadoop config variables mapred.output.compress* </description>
    -</property>
    -
    -<property>
    - <name>hive.exec.parallel</name>
    - <value>false</value>
    - <description>Whether to execute jobs in parallel</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.parallel.thread.number</name>
    - <value>8</value>
    - <description>How many jobs at most can be executed in parallel</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.rowoffset</name>
    - <value>false</value>
    - <description>Whether to provide the row offset virtual column</description>
    -</property>
    -
    -<property>
    - <name>hive.counters.group.name</name>
    - <value>HIVE</value>
    - <description>The name of counter group for internal Hive variables (CREATED_FILE, FATAL_ERROR, etc.)</description>
    -</property>
    -
    -<property>
    - <name>hive.hwi.war.file</name>
    - <value>lib/hive-hwi-@VERSION@.war</value>
    - <description>This sets the path to the HWI war file, relative to ${HIVE_HOME}. </description>
    -</property>
    -
    -<property>
    - <name>hive.hwi.listen.host</name>
    - <value>0.0.0.0</value>
    - <description>This is the host address the Hive Web Interface will listen on</description>
    -</property>
    -
    -<property>
    - <name>hive.hwi.listen.port</name>
    - <value>9999</value>
    - <description>This is the port the Hive Web Interface will listen on</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.pre.hooks</name>
    - <value></value>
    - <description>Comma-separated list of pre-execution hooks to be invoked for each statement. A pre-execution hook is specified as the name of a Java class which implements the org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.post.hooks</name>
    - <value></value>
    - <description>Comma-separated list of post-execution hooks to be invoked for each statement. A post-execution hook is specified as the name of a Java class which implements the org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.failure.hooks</name>
    - <value></value>
    - <description>Comma-separated list of on-failure hooks to be invoked for each statement. An on-failure hook is specified as the name of Java class which implements the org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.init.hooks</name>
    - <value></value>
    - <description>A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener.</description>
    -</property>
    -
    -<property>
    - <name>hive.client.stats.publishers</name>
    - <value></value>
    - <description>Comma-separated list of statistics publishers to be invoked on counters on each job. A client stats publisher is specified as the name of a Java class which implements the org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface.</description>
    -</property>
    -
    -<property>
    - <name>hive.client.stats.counters</name>
    - <value></value>
    - <description>Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). Non-display names should be used</description>
    -</property>
    -
    -<property>
    - <name>hive.merge.mapfiles</name>
    - <value>true</value>
    - <description>Merge small files at the end of a map-only job</description>
    -</property>
    -
    -<property>
    - <name>hive.merge.mapredfiles</name>
    - <value>false</value>
    - <description>Merge small files at the end of a map-reduce job</description>
    -</property>
    -
    -<property>
    - <name>hive.heartbeat.interval</name>
    - <value>1000</value>
    - <description>Send a heartbeat after this interval - used by mapjoin and filter operators</description>
    -</property>
    -
    -<property>
    - <name>hive.merge.size.per.task</name>
    - <value>256000000</value>
    - <description>Size of merged files at the end of the job</description>
    -</property>
    -
    -<property>
    - <name>hive.merge.smallfiles.avgsize</name>
    - <value>16000000</value>
    - <description>When the average output file size of a job is less than this number, Hive will start an additional map-reduce job to merge the output files into bigger files. This is only done for map-only jobs if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true.</description>
    -</property>
    -
    -<property>
    - <name>hive.mapjoin.smalltable.filesize</name>
    - <value>25000000</value>
    - <description>The threshold for the input file size of the small tables; if the file size is smaller than this threshold, it will try to convert the common join into map join</description>
    -</property>
    -
    -<property>
    - <name>hive.ignore.mapjoin.hint</name>
    - <value>true</value>
    - <description>Ignore the mapjoin hint</description>
    -</property>
    -
    -<property>
    - <name>hive.mapjoin.localtask.max.memory.usage</name>
    - <value>0.90</value>
    - <description>This number means how much memory the local task can take to hold the key/value into an in-memory hash table. If the local task's memory usage is more than this number, the local task will abort by itself. It means the data of the small table is too large to be held in memory.</description>
    -</property>
    -
    -<property>
    - <name>hive.mapjoin.followby.gby.localtask.max.memory.usage</name>
    - <value>0.55</value>
    - <description>This number means how much memory the local task can take to hold the key/value into an in-memory hash table when this map join is followed by a group by. If the local task's memory usage is more than this number, the local task will abort by itself. It means the data of the small table is too large to be held in memory.</description>
    -</property>
    -
    -<property>
    - <name>hive.mapjoin.check.memory.rows</name>
    - <value>100000</value>
    - <description>The number means after how many rows processed it needs to check the memory usage</description>
    -</property>
    -
    -<property>
    - <name>hive.auto.convert.join</name>
    - <value>false</value>
    - <description>Whether Hive enables the optimization about converting common join into mapjoin based on the input file size</description>
    -</property>
    -
    -<property>
    - <name>hive.auto.convert.join.noconditionaltask</name>
    - <value>true</value>
    - <description>Whether Hive enables the optimization about converting common join into mapjoin based on the input file
    - size. If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
    - specified size, the join is directly converted to a mapjoin (there is no conditional task).
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.auto.convert.join.noconditionaltask.size</name>
    - <value>10000000</value>
    - <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
    - is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
    - converted to a mapjoin(there is no conditional task). The default is 10MB
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.auto.convert.join.use.nonstaged</name>
    - <value>true</value>
    - <description>For conditional joins, if input stream from a small alias can be directly applied to join operator without
    - filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.
    - Currently, this is not working with vectorization or tez execution engine.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.script.auto.progress</name>
    - <value>false</value>
    - <description>Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker to avoid the task getting killed because of inactivity. Hive sends progress information when the script is outputting to stderr. This option removes the need of periodically producing stderr messages, but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker. </description>
    -</property>
    -
    -<property>
    - <name>hive.script.serde</name>
    - <value>org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe</value>
    - <description>The default SerDe for transmitting input data to and reading output data from the user scripts. </description>
    -</property>
    -
    -<property>
    - <name>hive.binary.record.max.length</name>
    - <value>1000</value>
    - <description>Read from a binary stream and treat each hive.binary.record.max.length bytes as a record.
    - The last record before the end of stream can have less than hive.binary.record.max.length bytes</description>
    -</property>
    -
    -<property>
    - <name>hive.server2.max.start.attempts</name>
    - <value>30</value>
    - <description>This number of times HiveServer2 will attempt to start before exiting, sleeping 60 seconds between retries. The default of 30 will keep trying for 30 minutes.</description>
    -</property>
    -
    -<property>
    - <name>hive.server2.transport.mode</name>
    - <value>binary</value>
    - <description>Server transport mode. "binary" or "http".</description>
    -</property>
    -
    -<property>
    - <name>hive.server2.thrift.http.port</name>
    - <value>10001</value>
    - <description>Port number when in HTTP mode.</description>
    -</property>
    -
    -<property>
    - <name>hive.server2.thrift.http.path</name>
    - <value>cliservice</value>
    - <description>Path component of URL endpoint when in HTTP mode.</description>
    -</property>
    -
    -<property>
    - <name>hive.server2.thrift.http.min.worker.threads</name>
    - <value>5</value>
    - <description>Minimum number of worker threads when in HTTP mode.</description>
    -</property>
    -
    -<property>
    - <name>hive.server2.thrift.http.max.worker.threads</name>
    - <value>500</value>
    - <description>Maximum number of worker threads when in HTTP mode.</description>
    -</property>
    -
    -<property>
    - <name>hive.script.recordreader</name>
    - <value>org.apache.hadoop.hive.ql.exec.TextRecordReader</value>
    - <description>The default record reader for reading data from the user scripts. </description>
    -</property>
    -
    -<property>
    - <name>stream.stderr.reporter.prefix</name>
    - <value>reporter:</value>
    - <description>Streaming jobs that log to standard error with this prefix can log counter or status information.</description>
    -</property>
    -
    -<property>
    - <name>stream.stderr.reporter.enabled</name>
    - <value>true</value>
    - <description>Enable consumption of status and counter messages for streaming jobs.</description>
    -</property>
    -
    -<property>
    - <name>hive.script.recordwriter</name>
    - <value>org.apache.hadoop.hive.ql.exec.TextRecordWriter</value>
    - <description>The default record writer for writing data to the user scripts. </description>
    -</property>
    -
    -<property>
    - <name>hive.input.format</name>
    - <value>org.apache.hadoop.hive.ql.io.CombineHiveInputFormat</value>
    - <description>The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat.</description>
    -</property>
    -
    -<property>
    - <name>hive.udtf.auto.progress</name>
    - <value>false</value>
    - <description>Whether Hive should automatically send progress information to TaskTracker when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious because this may prevent TaskTracker from killing tasks with infinite loops. </description>
    -</property>
    -
    -<property>
    - <name>hive.mapred.reduce.tasks.speculative.execution</name>
    - <value>true</value>
    - <description>Whether speculative execution for reducers should be turned on. </description>
    -</property>
    -
    -<property>
    - <name>hive.exec.counters.pull.interval</name>
    - <value>1000</value>
    - <description>The interval with which to poll the JobTracker for the counters the running job. The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be.</description>
    -</property>
    -
    -<property>
    - <name>hive.querylog.location</name>
    - <value>/tmp/${user.name}</value>
    - <description>
    - Location of Hive run time structured log file
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.querylog.enable.plan.progress</name>
    - <value>true</value>
    - <description>
    - Whether to log the plan's progress every time a job's progress is checked.
    - These logs are written to the location specified by hive.querylog.location
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.querylog.plan.progress.interval</name>
    - <value>60000</value>
    - <description>
    - The interval to wait between logging the plan's progress in milliseconds.
    - If there is a whole number percentage change in the progress of the mappers or the reducers,
    - the progress is logged regardless of this value.
    - The actual interval will be the ceiling of (this value divided by the value of
    - hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval
    - I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be
    - logged less frequently than specified.
    - This only has an effect if hive.querylog.enable.plan.progress is set to true.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.enforce.bucketing</name>
    - <value>false</value>
    - <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced. </description>
    -</property>
    -
    -<property>
    - <name>hive.enforce.sorting</name>
    - <value>false</value>
    - <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced. </description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.bucketingsorting</name>
    - <value>true</value>
    - <description>If hive.enforce.bucketing or hive.enforce.sorting is true, don't create a reducer for enforcing
    - bucketing/sorting for queries of the form:
    - insert overwrite table T2 select * from T1;
    - where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.enforce.sortmergebucketmapjoin</name>
    - <value>false</value>
    - <description>If the user asked for sort-merge bucketed map-side join, and it cannot be performed,
    - should the query fail or not ?
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.auto.convert.sortmerge.join</name>
    - <value>false</value>
    - <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
    - the criteria for sort-merge join.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.auto.convert.sortmerge.join.bigtable.selection.policy</name>
    - <value>org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ</value>
    - <description>The policy to choose the big table for automatic conversion to sort-merge join.
    - By default, the table with the largest partitions is assigned the big table. All policies are:
    - . based on position of the table - the leftmost table is selected
    - org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ.
    - . based on total size (all the partitions selected in the query) of the table
    - org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ.
    - . based on average size (all the partitions selected in the query) of the table
    - org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.
    - New policies can be added in future.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.auto.convert.sortmerge.join.to.mapjoin</name>
    - <value>false</value>
    - <description>If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join,
    - this parameter decides whether each table should be tried as a big table, and effectively a map-join should be
    - tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the
    - big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a
    - sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted
    - and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table
    - with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster
    - if the complete small table can fit in memory, and a map-join can be performed.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.ds.connection.url.hook</name>
    - <value></value>
    - <description>Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used </description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.ds.retry.attempts</name>
    - <value>1</value>
    - <description>The number of times to retry a metastore call if there were a connection error</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.ds.retry.interval</name>
    - <value>1000</value>
    - <description>The number of milliseconds between metastore retry attempts</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.server.min.threads</name>
    - <value>200</value>
    - <description>Minimum number of worker threads in the Thrift server's pool.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.server.max.threads</name>
    - <value>100000</value>
    - <description>Maximum number of worker threads in the Thrift server's pool.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.server.tcp.keepalive</name>
    - <value>true</value>
    - <description>Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.sasl.enabled</name>
    - <value>false</value>
    - <description>If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.thrift.framed.transport.enabled</name>
    - <value>false</value>
    - <description>If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.kerberos.keytab.file</name>
    - <value></value>
    - <description>The path to the Kerberos Keytab file containing the metastore Thrift server's service principal.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.kerberos.principal</name>
    - <value>hive-metastore/_HOST@EXAMPLE.COM</value>
    - <description>The service principal for the metastore Thrift server. The special string _HOST will be replaced automatically with the correct host name.</description>
    -</property>
    -
    -<property>
    - <name>hive.cluster.delegation.token.store.class</name>
    - <value>org.apache.hadoop.hive.thrift.MemoryTokenStore</value>
    - <description>The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster.</description>
    -</property>
    -
    -<property>
    - <name>hive.cluster.delegation.token.store.zookeeper.connectString</name>
    - <value>localhost:2181</value>
    - <description>The ZooKeeper token store connect string.</description>
    -</property>
    -
    -<property>
    - <name>hive.cluster.delegation.token.store.zookeeper.znode</name>
    - <value>/hive/cluster/delegation</value>
    - <description>The root path for token store data.</description>
    -</property>
    -
    -<property>
    - <name>hive.cluster.delegation.token.store.zookeeper.acl</name>
    - <value>sasl:hive/host1@EXAMPLE.COM:cdrwa,sasl:hive/host2@EXAMPLE.COM:cdrwa</value>
    - <description>ACL for token store entries. List comma separated all server principals for the cluster.</description>
    -</property>
    -
    -<property>
    - <name>hive.metastore.cache.pinobjtypes</name>
    - <value>Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order</value>
    - <description>List of comma separated metastore object types that should be pinned in the cache</description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.reducededuplication</name>
    - <value>true</value>
    - <description>Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. This should always be set to true. Since it is a new feature, it has been made configurable.</description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.correlation</name>
    - <value>false</value>
    - <description>exploit intra-query correlations.</description>
    -</property>
    -
    -<property>
    - <name>hive.optimize.reducededuplication.min.reducer</name>
    - <value>4</value>
    - <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
    - That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
    - The optimization will be disabled if number of reducers is less than specified value.</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.dynamic.partition</name>
    - <value>true</value>
    - <description>Whether or not to allow dynamic partitions in DML/DDL.</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.dynamic.partition.mode</name>
    - <value>strict</value>
    - <description>In strict mode, the user must specify at least one static partition in case the user accidentally overwrites all partitions.</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.max.dynamic.partitions</name>
    - <value>1000</value>
    - <description>Maximum number of dynamic partitions allowed to be created in total.</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.max.dynamic.partitions.pernode</name>
    - <value>100</value>
    - <description>Maximum number of dynamic partitions allowed to be created in each mapper/reducer node.</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.max.created.files</name>
    - <value>100000</value>
    - <description>Maximum number of HDFS files created by all mappers/reducers in a MapReduce job.</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.default.partition.name</name>
    - <value>__HIVE_DEFAULT_PARTITION__</value>
    - <description>The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). The user has to be aware that the dynamic partition value should not contain this value to avoid confusions.</description>
    -</property>
    -
    -<property>
    - <name>hive.stats.dbclass</name>
    - <value>counter</value>
    - <description>The storage that stores temporary Hive statistics. Currently, jdbc, hbase, counter and custom type are supported.</description>
    -</property>
    -
    -<property>
    - <name>hive.stats.autogather</name>
    - <value>true</value>
    - <description>A flag to gather statistics automatically during the INSERT OVERWRITE command.</description>
    -</property>
    -
    -<property>
    - <name>hive.stats.jdbcdriver</name>
    - <value>org.apache.derby.jdbc.EmbeddedDriver</value>
    - <description>The JDBC driver for the database that stores temporary Hive statistics.</description>
    -</property>
    -
    -<property>
    - <name>hive.stats.dbconnectionstring</name>
    - <value>jdbc:derby:;databaseName=TempStatsStore;create=true</value>
    - <description>The default connection string for the database that stores temporary Hive statistics.</description>
    -</property>
    -
    -<property>
    - <name>hive.stats.default.publisher</name>
    - <value></value>
    - <description>The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type.</description>
    -</property>
    -
    -<property>
    - <name>hive.stats.default.aggregator</name>
    - <value></value>
    - <description>The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type.</description>
    -</property>
    -
    -<property>
    - <name>hive.stats.jdbc.timeout</name>
    - <value>30</value>
    - <description>Timeout value (number of seconds) used by JDBC connection and statements.</description>
    -</property>
    -
    -<property>
    - <name>hive.stats.retries.max</name>
    - <value>0</value>
    - <description>Maximum number of retries when stats publisher/aggregator got an exception updating intermediate database. Default is no tries on failures.</description>
    -</property>
    -
    -<property>
    - <name>hive.stats.retries.wait</name>
    - <value>3000</value>
    - <description>The base waiting window (in milliseconds) before the next retry. The actual wait time is calculated by baseWindow * failures baseWindow * (failure 1) * (random number between [0.0,1.0]).</description>
    -</property>
    -
    -<property>
    - <name>hive.stats.reliable</name>
    - <value>false</value>
    - <description>Whether queries will fail because stats cannot be collected completely accurately.
    - If this is set to true, reading/writing from/into a partition may fail because the stats
    - could not be computed accurately.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.collect.tablekeys</name>
    - <value>false</value>
    - <description>Whether join and group by keys on tables are derived and maintained in the QueryPlan.
    - This is useful to identify how tables are accessed and to determine if they should be bucketed.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.collect.scancols</name>
    - <value>false</value>
    - <description>Whether column accesses are tracked in the QueryPlan.
    - This is useful to identify how tables are accessed and to determine if there are wasted columns that can be trimmed.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.ndv.error</name>
    - <value>20.0</value>
    - <description>Standard error expressed in percentage. Provides a tradeoff between accuracy and compute cost.A lower value for error indicates higher accuracy and a higher compute cost.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.key.prefix.max.length</name>
    - <value>200</value>
    - <description>
    - Determines if when the prefix of the key used for intermediate stats collection
    - exceeds a certain length, a hash of the key is used instead. If the value &lt; 0 then hashing
    - is never used, if the value >= 0 then hashing is used only when the key prefixes length
    - exceeds that value. The key prefix is defined as everything preceding the task ID in the key.
    - For counter type stats, it's maxed by mapreduce.job.counters.group.name.max, which is by default 128.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.key.prefix.reserve.length</name>
    - <value>24</value>
    - <description>
    - Reserved length for postfix of stats key. Currently only meaningful for counter type which should
    - keep length of full stats key smaller than max length configured by hive.stats.key.prefix.max.length.
    - For counter type, it should be bigger than the length of LB spec if exists.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.max.variable.length</name>
    - <value>100</value>
    - <description>
    - To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),
    - average row size is multiplied with the total number of rows coming out of each operator.
    - Average row size is computed from average column size of all columns in the row. In the absence
    - of column statistics, for variable length columns (like string, bytes etc.), this value will be
    - used. For fixed length columns their corresponding Java equivalent sizes are used
    - (float - 4 bytes, double - 8 bytes etc.).
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.list.num.entries</name>
    - <value>10</value>
    - <description>
    - To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),
    - average row size is multiplied with the total number of rows coming out of each operator.
    - Average row size is computed from average column size of all columns in the row. In the absence
    - of column statistics and for variable length complex columns like list, the average number of
    - entries/values can be specified using this config.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.map.num.entries</name>
    - <value>10</value>
    - <description>
    - To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),
    - average row size is multiplied with the total number of rows coming out of each operator.
    - Average row size is computed from average column size of all columns in the row. In the absence
    - of column statistics and for variable length complex columns like map, the average number of
    - entries/values can be specified using this config.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.map.parallelism</name>
    - <value>1</value>
    - <description>
    - Hive/Tez optimizer estimates the data size flowing through each of the operators.
    - For GROUPBY operator, to accurately compute the data size map-side parallelism needs to
    - be known. By default, this value is set to 1 since optimizer is not aware of the number of
    - mappers during compile-time. This Hive config can be used to specify the number of mappers
    - to be used for data size computation of GROUPBY operator.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.fetch.column.stats</name>
    - <value>false</value>
    - <description>
    - Annotation of operator tree with statistics information requires column statisitcs.
    - Column statistics are fetched from metastore. Fetching column statistics for each needed column
    - can be expensive when the number of columns is high. This flag can be used to disable fetching
    - of column statistics from metastore.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.fetch.partition.stats</name>
    - <value>true</value>
    - <description>
    - Annotation of operator tree with statistics information requires partition level basic
    - statisitcs like number of rows, data size and file size. Partition statistics are fetched from
    - metastore. Fetching partition statistics for each needed partition can be expensive when the
    - number of partitions is high. This flag can be used to disable fetching of partition statistics
    - from metastore. When this flag is disabled, Hive will make calls to filesystem to get file sizes
    - and will estimate the number of rows from row schema.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.join.factor</name>
    - <value>1.1</value>
    - <description>
    - Hive/Tez optimizer estimates the data size flowing through each of the operators. JOIN operator
    - uses column statistics to estimate the number of rows flowing out of it and hence the data size.
    - In the absence of column statistics, this factor determines the amount of rows that flows out
    - of JOIN operator.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.stats.deserialization.factor</name>
    - <value>1.0</value>
    - <description>
    - Hive/Tez optimizer estimates the data size flowing through each of the operators. In the absence
    - of basic statistics like number of rows and data size, file size is used to estimate the number
    - of rows and data size. Since files in tables/partitions are serialized (and optionally
    - compressed) the estimates of number of rows and data size cannot be reliably determined.
    - This factor is multiplied with the file size to account for serialization and compression.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.support.concurrency</name>
    - <value>false</value>
    - <description>Whether Hive supports concurrency or not. A ZooKeeper instance must be up and running for the default Hive lock manager to support read-write locks.</description>
    -</property>
    -
    -<property>
    - <name>hive.lock.numretries</name>
    - <value>100</value>
    - <description>The number of times you want to try to get all the locks</description>
    -</property>
    -
    -<property>
    - <name>hive.unlock.numretries</name>
    - <value>10</value>
    - <description>The number of times you want to retry to do one unlock</description>
    -</property>
    -
    -<property>
    - <name>hive.lock.sleep.between.retries</name>
    - <value>60</value>
    - <description>The sleep time (in seconds) between various retries</description>
    -</property>
    -
    -<property>
    - <name>hive.zookeeper.quorum</name>
    - <value></value>
    - <description>The list of ZooKeeper servers to talk to. This is only needed for read/write locks.</description>
    -</property>
    -
    -<property>
    - <name>hive.zookeeper.client.port</name>
    - <value>2181</value>
    - <description>The port of ZooKeeper servers to talk to. This is only needed for read/write locks.</description>
    -</property>
    -
    -<property>
    - <name>hive.zookeeper.session.timeout</name>
    - <value>600000</value>
    - <description>ZooKeeper client's session timeout. The client is disconnected, and as a result, all locks released, if a heartbeat is not sent in the timeout.</description>
    -</property>
    -
    -<property>
    - <name>hive.zookeeper.namespace</name>
    - <value>hive_zookeeper_namespace</value>
    - <description>The parent node under which all ZooKeeper nodes are created.</description>
    -</property>
    -
    -<property>
    - <name>hive.zookeeper.clean.extra.nodes</name>
    - <value>false</value>
    - <description>Clean extra nodes at the end of the session.</description>
    -</property>
    -
    -<property>
    - <name>fs.har.impl</name>
    - <value>org.apache.hadoop.hive.shims.HiveHarFileSystem</value>
    - <description>The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20</description>
    -</property>
    -
    -<property>
    - <name>hive.archive.enabled</name>
    - <value>false</value>
    - <description>Whether archiving operations are permitted</description>
    -</property>
    -
    -<property>
    - <name>hive.fetch.output.serde</name>
    - <value>org.apache.hadoop.hive.serde2.DelimitedJSONSerDe</value>
    - <description>The SerDe used by FetchTask to serialize the fetch output.</description>
    -</property>
    -
    -<property>
    - <name>hive.exec.mode.local.auto</name>
    - <value>false</value>
    - <description> Let Hive determine whether to run in local mode automatically </description>
    -</property>
    -
    -<property>
    - <name>hive.exec.drop.ignorenonexistent</name>
    - <value>true</value>
    - <description>
    - Do not report an error if DROP TABLE/VIEW specifies a non-existent table/view
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.exec.show.job.failure.debug.info</name>
    - <value>true</value>
    - <description>
    - If a job fails, whether to provide a link in the CLI to the task with the
    - most failures, along with debugging hints if applicable.
    - </description>
    -</property>
    -
    -<property>
    - <name>hive.auto.progress.timeout</name>
    - <value>0</value>
    - <description>
    - How long to run autoprogressor for the script/UDTF operators (in seconds).
    - Set to 0 for forever.
    - </description>
    -</property>
    -
    -<!-- HBase Storage Handler Parameters -->
    -
    -<property>
    - <name>hive.hbase.wal.enabled</name>
    - <value>true</value>
    - <description>Whether writes to HBase should be forced to the write-ahead log. Disabling this improves HBase write performance at the risk of lost writes in case of a crash.</description>
    -</property>
    -
    -<property>
    - <name>hive.table.parameters.default</name>
    - <value></value>
    - <description>Default property values for newly created tables</description>
    -</property>
    -
    -<property>
    - <name>hive.entity.separator</name>
    - <value>@</value>
    - <description>Separator used to construct names of tables and partitions. For example, dbname@tablename@partitionname</description>
    -</property>
    -
    -<property>
    - <name>hive.ddl.createtablelike.properties.whitelist</name>
    - <value></value>
    - <description>Table Properties to copy over when executing a Create Table Like.</description>
    -</property>
    -
    -<property>
    - <name>hive.variable.substitute</name>
    - <value>true</value>
    - <description>This enables substitution using syntax like ${var} ${system:var} and ${env:var}.</description>
    -</property>
    -
    -<property>
    - <name>hive.variable.substitute.depth</name>
    - <value>40</value>
    - <description>The maximum replacements the substitution engine will do.</description>
    -</property>
    -
    -<property>

    [... 2628 lines stripped ...]

Related Discussions

Discussion Navigation
viewthread | post
Discussion Overview
groupcommits @
categorieshive, hadoop
postedFeb 18, '14 at 2:19a
activeFeb 18, '14 at 2:19a
posts5
users1
websitehive.apache.org

1 user in discussion

Brock: 5 posts

People

Translate

site design / logo © 2021 Grokbase