FAQ
Repository: hive
Updated Branches:
   refs/heads/spark 42216997f -> ab9b18dc8


HIVE-11170 : port parts of HIVE-11015 to master for ease of future merging (Sergey Shelukhin, reviewed by Vikram Dixit K)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d89a7d1e
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d89a7d1e
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d89a7d1e

Branch: refs/heads/spark
Commit: d89a7d1e7fe7fb51aeb514e4357ae149158b2a34
Parents: d314425
Author: Sergey Shelukhin <sershe@apache.org>
Authored: Thu Jul 9 17:50:32 2015 -0700
Committer: Sergey Shelukhin <sershe@apache.org>
Committed: Thu Jul 9 17:50:32 2015 -0700

----------------------------------------------------------------------
  .../hadoop/hive/ql/exec/FilterOperator.java | 3 +-
  .../hive/ql/exec/mr/ExecMapperContext.java | 10 +-
  .../ql/io/HiveContextAwareRecordReader.java | 2 +-
  .../org/apache/hadoop/hive/ql/io/IOContext.java | 43 ------
  .../apache/hadoop/hive/ql/io/IOContextMap.java | 81 +++++++++++
  .../hadoop/hive/ql/exec/TestOperators.java | 3 +-
  .../ql/io/TestHiveBinarySearchRecordReader.java | 2 +-
  .../hadoop/hive/ql/io/TestIOContextMap.java | 133 +++++++++++++++++++
  8 files changed, 223 insertions(+), 54 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
index 65301c0..ae35766 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java
@@ -25,6 +25,7 @@ import java.util.concurrent.Future;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.hive.conf.HiveConf;
  import org.apache.hadoop.hive.ql.io.IOContext;
+import org.apache.hadoop.hive.ql.io.IOContextMap;
  import org.apache.hadoop.hive.ql.metadata.HiveException;
  import org.apache.hadoop.hive.ql.plan.FilterDesc;
  import org.apache.hadoop.hive.ql.plan.api.OperatorType;
@@ -61,7 +62,7 @@ public class FilterOperator extends Operator<FilterDesc> implements
        }

        conditionInspector = null;
- ioContext = IOContext.get(hconf);
+ ioContext = IOContextMap.get(hconf);
      } catch (Throwable e) {
        throw new HiveException(e);
      }

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java
index 13d0650..fc5abfe 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapperContext.java
@@ -22,8 +22,8 @@ import java.util.Map;
  import org.apache.commons.logging.Log;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.hive.ql.exec.FetchOperator;
-import org.apache.hadoop.hive.ql.exec.Utilities;
  import org.apache.hadoop.hive.ql.io.IOContext;
+import org.apache.hadoop.hive.ql.io.IOContextMap;
  import org.apache.hadoop.hive.ql.plan.MapredLocalWork;
  import org.apache.hadoop.mapred.JobConf;

@@ -63,11 +63,11 @@ public class ExecMapperContext {

    public ExecMapperContext(JobConf jc) {
      this.jc = jc;
- ioCxt = IOContext.get(jc);
+ ioCxt = IOContextMap.get(jc);
    }

    public void clear() {
- IOContext.clear();
+ IOContextMap.clear();
      ioCxt = null;
    }

@@ -151,8 +151,4 @@ public class ExecMapperContext {
    public IOContext getIoCxt() {
      return ioCxt;
    }
-
- public void setIoCxt(IOContext ioCxt) {
- this.ioCxt = ioCxt;
- }
  }

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
index 9b3f8ec..738ca9c 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java
@@ -162,7 +162,7 @@ public abstract class HiveContextAwareRecordReader<K, V> implements RecordReader
    }

    public IOContext getIOContext() {
- return IOContext.get(jobConf);
+ return IOContextMap.get(jobConf);
    }

    private void initIOContext(long startPos, boolean isBlockPointer,

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java b/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
index ebad0a6..019db8d 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java
@@ -18,13 +18,7 @@

  package org.apache.hadoop.hive.ql.io;

-import java.util.HashMap;
-import java.util.Map;
-
-import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.ql.exec.Utilities;

  /**
   * IOContext basically contains the position information of the current
@@ -35,43 +29,6 @@ import org.apache.hadoop.hive.ql.exec.Utilities;
   * nextBlockStart refers the end of current row and beginning of next row.
   */
  public class IOContext {
-
- /**
- * Spark uses this thread local
- */
- private static final ThreadLocal<IOContext> threadLocal = new ThreadLocal<IOContext>(){
- @Override
- protected IOContext initialValue() { return new IOContext(); }
- };
-
- private static IOContext get() {
- return IOContext.threadLocal.get();
- }
-
- /**
- * Tez and MR use this map but are single threaded per JVM thus no synchronization is required.
- */
- private static final Map<String, IOContext> inputNameIOContextMap = new HashMap<String, IOContext>();
-
-
- public static IOContext get(Configuration conf) {
- if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
- return get();
- }
- String inputName = conf.get(Utilities.INPUT_NAME);
- if (!inputNameIOContextMap.containsKey(inputName)) {
- IOContext ioContext = new IOContext();
- inputNameIOContextMap.put(inputName, ioContext);
- }
-
- return inputNameIOContextMap.get(inputName);
- }
-
- public static void clear() {
- IOContext.threadLocal.remove();
- inputNameIOContextMap.clear();
- }
-
    private long currentBlockStart;
    private long nextBlockStart;
    private long currentRow;

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/java/org/apache/hadoop/hive/ql/io/IOContextMap.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/IOContextMap.java b/ql/src/java/org/apache/hadoop/hive/ql/io/IOContextMap.java
new file mode 100644
index 0000000..342c526
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/IOContextMap.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io;
+
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+
+/**
+ * NOTE: before LLAP branch merge, there's no LLAP code here.
+ * There used to be a global static map of IOContext-s inside IOContext (Hive style!).
+ * Unfortunately, due to variety of factors, this is now a giant fustercluck.
+ * 1) Spark doesn't apparently care about multiple inputs, but has multiple threads, so one
+ * threadlocal IOContext was added for it.
+ * 2) LLAP has lots of tasks in the same process so globals no longer cut it either.
+ * 3) However, Tez runs 2+ threads for one task (e.g. TezTaskEventRouter and TezChild), and these
+ * surprisingly enough need the same context. Tez, in its infinite wisdom, doesn't allow them
+ * to communicate in any way nor provide any shared context.
+ * So we are going to...
+ * 1) Keep the good ol' global map for MR and Tez. Hive style!
+ * 2) Keep the threadlocal for Spark. Hive style!
+ * 3) Create inheritable (TADA!) threadlocal with attemptId, only set in LLAP; that will propagate
+ * to all the little Tez threads, and we will keep a map per attempt. Hive style squared!
+ */
+public class IOContextMap {
+ public static final String DEFAULT_CONTEXT = "";
+ private static final Log LOG = LogFactory.getLog(IOContextMap.class);
+
+ /** Used for Tez and MR */
+ private static final ConcurrentHashMap<String, IOContext> globalMap =
+ new ConcurrentHashMap<String, IOContext>();
+
+ /** Used for Spark */
+ private static final ThreadLocal<IOContext> sparkThreadLocal = new ThreadLocal<IOContext>(){
+ @Override
+ protected IOContext initialValue() { return new IOContext(); }
+ };
+
+ public static IOContext get(Configuration conf) {
+ if (HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("spark")) {
+ return sparkThreadLocal.get();
+ }
+ String inputName = conf.get(Utilities.INPUT_NAME);
+ if (inputName == null) {
+ inputName = DEFAULT_CONTEXT;
+ }
+ ConcurrentHashMap<String, IOContext> map;
+ map = globalMap;
+
+ IOContext ioContext = map.get(inputName);
+ if (ioContext != null) return ioContext;
+ ioContext = new IOContext();
+ IOContext oldContext = map.putIfAbsent(inputName, ioContext);
+ return (oldContext == null) ? ioContext : oldContext;
+ }
+
+ public static void clear() {
+ sparkThreadLocal.remove();
+ globalMap.clear();
+ }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
index 62057d8..c3a36c0 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.hive.conf.HiveConf;
  import org.apache.hadoop.hive.ql.Driver;
  import org.apache.hadoop.hive.ql.io.IOContext;
+import org.apache.hadoop.hive.ql.io.IOContextMap;
  import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory;
  import org.apache.hadoop.hive.ql.plan.CollectDesc;
  import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
@@ -272,7 +273,7 @@ public class TestOperators extends TestCase {
        JobConf hconf = new JobConf(TestOperators.class);
        HiveConf.setVar(hconf, HiveConf.ConfVars.HADOOPMAPFILENAME,
            "hdfs:///testDir/testFile");
- IOContext.get(hconf).setInputPath(
+ IOContextMap.get(hconf).setInputPath(
            new Path("hdfs:///testDir/testFile"));

        // initialize pathToAliases

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java
index 7a1748c..9dc4f5b 100644
--- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java
@@ -116,7 +116,7 @@ public class TestHiveBinarySearchRecordReader extends TestCase {

    private void resetIOContext() {
      conf.set(Utilities.INPUT_NAME, "TestHiveBinarySearchRecordReader");
- ioContext = IOContext.get(conf);
+ ioContext = IOContextMap.get(conf);
      ioContext.setUseSorted(false);
      ioContext.setBinarySearching(false);
      ioContext.setEndBinarySearch(false);

http://git-wip-us.apache.org/repos/asf/hive/blob/d89a7d1e/ql/src/test/org/apache/hadoop/hive/ql/io/TestIOContextMap.java
----------------------------------------------------------------------
diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestIOContextMap.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestIOContextMap.java
new file mode 100644
index 0000000..4469353
--- /dev/null
+++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestIOContextMap.java
@@ -0,0 +1,133 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.io;
+
+import static org.junit.Assert.*;
+
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.FutureTask;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.junit.Test;
+
+import com.google.common.collect.Sets;
+
+public class TestIOContextMap {
+
+ private void syncThreadStart(final CountDownLatch cdlIn, final CountDownLatch cdlOut) {
+ cdlIn.countDown();
+ try {
+ cdlOut.await();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ @Test
+ public void testMRTezGlobalMap() throws Exception {
+ // Tests concurrent modification, and that results are the same per input across threads
+ // but different between inputs.
+ final int THREAD_COUNT = 2, ITER_COUNT = 1000;
+ final AtomicInteger countdown = new AtomicInteger(ITER_COUNT);
+ final CountDownLatch phase1End = new CountDownLatch(THREAD_COUNT);
+ final IOContext[] results = new IOContext[ITER_COUNT];
+ ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT);
+ final CountDownLatch cdlIn = new CountDownLatch(THREAD_COUNT), cdlOut = new CountDownLatch(1);
+
+ @SuppressWarnings("unchecked")
+ FutureTask<Void>[] tasks = new FutureTask[THREAD_COUNT];
+ for (int i = 0; i < tasks.length; ++i) {
+ tasks[i] = new FutureTask<Void>(new Callable<Void>() {
+ public Void call() throws Exception {
+ Configuration conf = new Configuration();
+ syncThreadStart(cdlIn, cdlOut);
+ // Phase 1 - create objects.
+ while (true) {
+ int nextIx = countdown.decrementAndGet();
+ if (nextIx < 0) break;
+ conf.set(Utilities.INPUT_NAME, "Input " + nextIx);
+ results[nextIx] = IOContextMap.get(conf);
+ if (nextIx == 0) break;
+ }
+ phase1End.countDown();
+ phase1End.await();
+ // Phase 2 - verify we get the expected objects created by all threads.
+ for (int i = 0; i < ITER_COUNT; ++i) {
+ conf.set(Utilities.INPUT_NAME, "Input " + i);
+ IOContext ctx = IOContextMap.get(conf);
+ assertSame(results[i], ctx);
+ }
+ return null;
+ }
+ });
+ executor.execute(tasks[i]);
+ }
+
+ cdlIn.await(); // Wait for all threads to be ready.
+ cdlOut.countDown(); // Release them at the same time.
+ for (int i = 0; i < tasks.length; ++i) {
+ tasks[i].get();
+ }
+ Set<IOContext> resultSet = Sets.newIdentityHashSet();
+ for (int i = 0; i < results.length; ++i) {
+ assertTrue(resultSet.add(results[i])); // All the objects must be different.
+ }
+ }
+
+ @Test
+ public void testSparkThreadLocal() throws Exception {
+ // Test that input name does not change IOContext returned, and that each thread gets its own.
+ final Configuration conf1 = new Configuration();
+ conf1.set(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname, "spark");
+ final Configuration conf2 = new Configuration(conf1);
+ conf2.set(Utilities.INPUT_NAME, "Other input");
+ final int THREAD_COUNT = 2;
+ ExecutorService executor = Executors.newFixedThreadPool(THREAD_COUNT);
+ final CountDownLatch cdlIn = new CountDownLatch(THREAD_COUNT), cdlOut = new CountDownLatch(1);
+ @SuppressWarnings("unchecked")
+ FutureTask<IOContext>[] tasks = new FutureTask[THREAD_COUNT];
+ for (int i = 0; i < tasks.length; ++i) {
+ tasks[i] = new FutureTask<IOContext>(new Callable<IOContext>() {
+ public IOContext call() throws Exception {
+ syncThreadStart(cdlIn, cdlOut);
+ IOContext c1 = IOContextMap.get(conf1), c2 = IOContextMap.get(conf2);
+ assertSame(c1, c2);
+ return c1;
+ }
+ });
+ executor.execute(tasks[i]);
+ }
+
+ cdlIn.await(); // Wait for all threads to be ready.
+ cdlOut.countDown(); // Release them at the same time.
+ Set<IOContext> results = Sets.newIdentityHashSet();
+ for (int i = 0; i < tasks.length; ++i) {
+ assertTrue(results.add(tasks[i].get())); // All the objects must be different.
+ }
+ }
+
+}

Search Discussions

  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11197 : While extracting join conditions follow Hive rules for type conversion instead of Calcite (Ashutosh Chauhan via Jesus Camacho Rodriguez)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/20f2c29f
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/20f2c29f
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/20f2c29f

    Branch: refs/heads/spark
    Commit: 20f2c29f42725c0dd82acc5e3d170d7423003b47
    Parents: b61e6b5
    Author: Ashutosh Chauhan <hashutosh@apache.org>
    Authored: Fri Jul 10 08:40:00 2015 -0700
    Committer: Ashutosh Chauhan <hashutosh@apache.org>
    Committed: Fri Jul 10 08:40:00 2015 -0700

    ----------------------------------------------------------------------
      .../ql/optimizer/calcite/HiveCalciteUtil.java | 25 ++++----
      .../ql/optimizer/calcite/HiveRelOptUtil.java | 36 +++++------
      .../calcite/cost/HiveOnTezCostModel.java | 25 ++++++--
      .../calcite/reloperators/HiveJoin.java | 11 ++--
      .../calcite/reloperators/HiveMultiJoin.java | 13 +++-
      .../rules/HiveInsertExchange4JoinRule.java | 13 +++-
      .../calcite/rules/HiveJoinAddNotNullRule.java | 16 +++--
      .../calcite/rules/HiveJoinToMultiJoinRule.java | 64 +++++++-------------
      .../calcite/stats/HiveRelMdSelectivity.java | 11 ++--
      9 files changed, 120 insertions(+), 94 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
    index 024097e..0200506 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
    @@ -339,25 +339,25 @@ public class HiveCalciteUtil {
            return this.mapOfProjIndxInJoinSchemaToLeafPInfo;
          }

    - public static JoinPredicateInfo constructJoinPredicateInfo(Join j) {
    + public static JoinPredicateInfo constructJoinPredicateInfo(Join j) throws CalciteSemanticException {
            return constructJoinPredicateInfo(j, j.getCondition());
          }

    - public static JoinPredicateInfo constructJoinPredicateInfo(HiveMultiJoin mj) {
    + public static JoinPredicateInfo constructJoinPredicateInfo(HiveMultiJoin mj) throws CalciteSemanticException {
            return constructJoinPredicateInfo(mj, mj.getCondition());
          }

    - public static JoinPredicateInfo constructJoinPredicateInfo(Join j, RexNode predicate) {
    + public static JoinPredicateInfo constructJoinPredicateInfo(Join j, RexNode predicate) throws CalciteSemanticException {
            return constructJoinPredicateInfo(j.getInputs(), j.getSystemFieldList(), predicate);
          }

    - public static JoinPredicateInfo constructJoinPredicateInfo(HiveMultiJoin mj, RexNode predicate) {
    + public static JoinPredicateInfo constructJoinPredicateInfo(HiveMultiJoin mj, RexNode predicate) throws CalciteSemanticException {
            final List<RelDataTypeField> systemFieldList = ImmutableList.of();
            return constructJoinPredicateInfo(mj.getInputs(), systemFieldList, predicate);
          }

          public static JoinPredicateInfo constructJoinPredicateInfo(List<RelNode> inputs,
    - List<RelDataTypeField> systemFieldList, RexNode predicate) {
    + List<RelDataTypeField> systemFieldList, RexNode predicate) throws CalciteSemanticException {
            JoinPredicateInfo jpi = null;
            JoinLeafPredicateInfo jlpi = null;
            List<JoinLeafPredicateInfo> equiLPIList = new ArrayList<JoinLeafPredicateInfo>();
    @@ -504,7 +504,7 @@ public class HiveCalciteUtil {
          // split accordingly. If the join condition is not part of the equi-join predicate,
          // the returned object will be typed as SQLKind.OTHER.
          private static JoinLeafPredicateInfo constructJoinLeafPredicateInfo(List<RelNode> inputs,
    - List<RelDataTypeField> systemFieldList, RexNode pe) {
    + List<RelDataTypeField> systemFieldList, RexNode pe) throws CalciteSemanticException {
            JoinLeafPredicateInfo jlpi = null;
            List<Integer> filterNulls = new ArrayList<Integer>();
            List<List<RexNode>> joinExprs = new ArrayList<List<RexNode>>();
    @@ -513,7 +513,7 @@ public class HiveCalciteUtil {
            }

            // 1. Split leaf join predicate to expressions from left, right
    - RexNode otherConditions = HiveRelOptUtil.splitJoinCondition(systemFieldList, inputs, pe,
    + RexNode otherConditions = HiveRelOptUtil.splitHiveJoinCondition(systemFieldList, inputs, pe,
                joinExprs, filterNulls, null);

            if (otherConditions.isAlwaysTrue()) {
    @@ -689,7 +689,7 @@ public class HiveCalciteUtil {
        public static ImmutableList<RexNode> getInputRef(List<Integer> inputRefs, RelNode inputRel) {
          ImmutableList.Builder<RexNode> bldr = ImmutableList.<RexNode> builder();
          for (int i : inputRefs) {
    - bldr.add(new RexInputRef(i, (RelDataType) inputRel.getRowType().getFieldList().get(i).getType()));
    + bldr.add(new RexInputRef(i, inputRel.getRowType().getFieldList().get(i).getType()));
          }
          return bldr.build();
        }
    @@ -697,7 +697,7 @@ public class HiveCalciteUtil {
        public static ExprNodeDesc getExprNode(Integer inputRefIndx, RelNode inputRel,
            ExprNodeConverter exprConv) {
          ExprNodeDesc exprNode = null;
    - RexNode rexInputRef = new RexInputRef(inputRefIndx, (RelDataType) inputRel.getRowType()
    + RexNode rexInputRef = new RexInputRef(inputRefIndx, inputRel.getRowType()
              .getFieldList().get(inputRefIndx).getType());
          exprNode = rexInputRef.accept(exprConv);

    @@ -723,9 +723,9 @@ public class HiveCalciteUtil {
          for (Integer iRef : inputRefs) {
            fieldNames.add(schemaNames.get(iRef));
          }
    -
    +
          return fieldNames;
    - }
    + }

        /**
         * Walks over an expression and determines whether it is constant.
    @@ -789,12 +789,13 @@ public class HiveCalciteUtil {

        private static class InputRefsCollector extends RexVisitorImpl<Void> {

    - private Set<Integer> inputRefSet = new HashSet<Integer>();
    + private final Set<Integer> inputRefSet = new HashSet<Integer>();

          private InputRefsCollector(boolean deep) {
            super(deep);
          }

    + @Override
          public Void visitInputRef(RexInputRef inputRef) {
            inputRefSet.add(inputRef.getIndex());
            return null;

    http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
    index 9ebb24f..ab793f1 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptUtil.java
    @@ -7,7 +7,6 @@ import org.apache.calcite.plan.RelOptCluster;
      import org.apache.calcite.plan.RelOptUtil;
      import org.apache.calcite.rel.RelNode;
      import org.apache.calcite.rel.type.RelDataType;
    -import org.apache.calcite.rel.type.RelDataTypeFactory;
      import org.apache.calcite.rel.type.RelDataTypeField;
      import org.apache.calcite.rex.RexBuilder;
      import org.apache.calcite.rex.RexCall;
    @@ -17,11 +16,13 @@ import org.apache.calcite.sql.SqlKind;
      import org.apache.calcite.sql.SqlOperator;
      import org.apache.calcite.sql.fun.SqlStdOperatorTable;
      import org.apache.calcite.util.ImmutableBitSet;
    -import org.apache.calcite.util.Util;
      import org.apache.commons.logging.Log;
      import org.apache.commons.logging.LogFactory;
    +import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
    +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter;
    +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
    +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;

    -import com.google.common.collect.ImmutableList;

      public class HiveRelOptUtil extends RelOptUtil {

    @@ -48,14 +49,15 @@ public class HiveRelOptUtil extends RelOptUtil {
         * join predicate are at the end of the key lists
         * returned
         * @return What's left, never null
    + * @throws CalciteSemanticException
         */
    - public static RexNode splitJoinCondition(
    + public static RexNode splitHiveJoinCondition(
            List<RelDataTypeField> sysFieldList,
            List<RelNode> inputs,
            RexNode condition,
            List<List<RexNode>> joinKeys,
            List<Integer> filterNulls,
    - List<SqlOperator> rangeOp) {
    + List<SqlOperator> rangeOp) throws CalciteSemanticException {
          final List<RexNode> nonEquiList = new ArrayList<>();

          splitJoinCondition(
    @@ -79,11 +81,10 @@ public class HiveRelOptUtil extends RelOptUtil {
            List<List<RexNode>> joinKeys,
            List<Integer> filterNulls,
            List<SqlOperator> rangeOp,
    - List<RexNode> nonEquiList) {
    + List<RexNode> nonEquiList) throws CalciteSemanticException {
          final int sysFieldCount = sysFieldList.size();
          final RelOptCluster cluster = inputs.get(0).getCluster();
          final RexBuilder rexBuilder = cluster.getRexBuilder();
    - final RelDataTypeFactory typeFactory = cluster.getTypeFactory();

          final ImmutableBitSet[] inputsRange = new ImmutableBitSet[inputs.size()];
          int totalFieldCount = 0;
    @@ -199,24 +200,25 @@ public class HiveRelOptUtil extends RelOptUtil {
                RelDataType rightKeyType = rightKey.getType();

                if (leftKeyType != rightKeyType) {
    - // perform casting
    - RelDataType targetKeyType =
    - typeFactory.leastRestrictive(
    - ImmutableList.of(leftKeyType, rightKeyType));
    + // perform casting using Hive rules
    + TypeInfo rType = TypeConverter.convert(rightKeyType);
    + TypeInfo lType = TypeConverter.convert(leftKeyType);
    + TypeInfo tgtType = FunctionRegistry.getCommonClassForComparison(lType, rType);

    - if (targetKeyType == null) {
    - throw Util.newInternal(
    + if (tgtType == null) {
    + throw new CalciteSemanticException(
                        "Cannot find common type for join keys "
    - + leftKey + " (type " + leftKeyType + ") and "
    - + rightKey + " (type " + rightKeyType + ")");
    + + leftKey + " (type " + leftKeyType + ") and "
    + + rightKey + " (type " + rightKeyType + ")");
                  }
    + RelDataType targetKeyType = TypeConverter.convert(tgtType, rexBuilder.getTypeFactory());

    - if (leftKeyType != targetKeyType) {
    + if (leftKeyType != targetKeyType && TypeInfoUtils.isConversionRequiredForComparison(tgtType, lType)) {
                    leftKey =
                        rexBuilder.makeCast(targetKeyType, leftKey);
                  }

    - if (rightKeyType != targetKeyType) {
    + if (rightKeyType != targetKeyType && TypeInfoUtils.isConversionRequiredForComparison(tgtType, rType)) {
                    rightKey =
                        rexBuilder.makeCast(targetKeyType, rightKey);
                  }

    http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
    index fb67309..e9f1d96 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/cost/HiveOnTezCostModel.java
    @@ -29,7 +29,10 @@ import org.apache.calcite.rel.metadata.RelMetadataQuery;
      import org.apache.calcite.util.ImmutableBitSet;
      import org.apache.calcite.util.ImmutableIntList;
      import org.apache.calcite.util.Pair;
    +import org.apache.commons.logging.Log;
    +import org.apache.commons.logging.LogFactory;
      import org.apache.hadoop.hive.conf.HiveConf;
    +import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
      import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate;
      import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
    @@ -48,6 +51,8 @@ public class HiveOnTezCostModel extends HiveCostModel {

        private static HiveAlgorithmsUtil algoUtils;

    + private static transient final Log LOG = LogFactory.getLog(HiveOnTezCostModel.class);
    +
        synchronized public static HiveOnTezCostModel getCostModel(HiveConf conf) {
          if (INSTANCE == null) {
            INSTANCE = new HiveOnTezCostModel(conf);
    @@ -136,7 +141,13 @@ public class HiveOnTezCostModel extends HiveCostModel {
                    add(leftRCount).
                    add(rightRCount).
                    build();
    - final double cpuCost = algoUtils.computeSortMergeCPUCost(cardinalities, join.getSortedInputs());
    + double cpuCost;
    + try {
    + cpuCost = algoUtils.computeSortMergeCPUCost(cardinalities, join.getSortedInputs());
    + } catch (CalciteSemanticException e) {
    + LOG.trace("Failed to compute sort merge cpu cost ", e);
    + return null;
    + }
            // 3. IO cost = cost of writing intermediary results to local FS +
            // cost of reading from local FS for transferring to join +
            // cost of transferring map outputs to Join operator
    @@ -183,7 +194,7 @@ public class HiveOnTezCostModel extends HiveCostModel {
            if (memoryWithinPhase == null || splitCount == null) {
              return null;
            }
    -
    +
            return memoryWithinPhase / splitCount;
          }

    @@ -289,7 +300,7 @@ public class HiveOnTezCostModel extends HiveCostModel {
            if (join.getStreamingSide() != MapJoinStreamingRelation.LEFT_RELATION
    join.getStreamingSide() != MapJoinStreamingRelation.RIGHT_RELATION) {
              return null;
    - }
    + }
            return HiveAlgorithmsUtil.getJoinDistribution(join.getJoinPredicateInfo(),
                    join.getStreamingSide());
          }
    @@ -521,7 +532,13 @@ public class HiveOnTezCostModel extends HiveCostModel {
            for (int i=0; i<join.getInputs().size(); i++) {
              RelNode input = join.getInputs().get(i);
              // Is smbJoin possible? We need correct order
    - boolean orderFound = join.getSortedInputs().get(i);
    + boolean orderFound;
    + try {
    + orderFound = join.getSortedInputs().get(i);
    + } catch (CalciteSemanticException e) {
    + LOG.trace("Not possible to do SMB Join ",e);
    + return false;
    + }
              if (!orderFound) {
                return false;
              }

    http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
    index 668960e..6814df6 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveJoin.java
    @@ -41,6 +41,7 @@ import org.apache.calcite.rel.type.RelDataTypeField;
      import org.apache.calcite.rex.RexNode;
      import org.apache.calcite.util.ImmutableBitSet;
      import org.apache.calcite.util.ImmutableIntList;
    +import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
      import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
    @@ -51,7 +52,7 @@ import com.google.common.collect.ImmutableList;

      //TODO: Should we convert MultiJoin to be a child of HiveJoin
      public class HiveJoin extends Join implements HiveRelNode {
    -
    +
        public static final JoinFactory HIVE_JOIN_FACTORY = new HiveJoinFactoryImpl();

        public enum MapJoinStreamingRelation {
    @@ -71,14 +72,14 @@ public class HiveJoin extends Join implements HiveRelNode {
            HiveJoin join = new HiveJoin(cluster, null, left, right, condition, joinType, variablesStopped,
                    DefaultJoinAlgorithm.INSTANCE, leftSemiJoin);
            return join;
    - } catch (InvalidRelException e) {
    + } catch (InvalidRelException | CalciteSemanticException e) {
            throw new RuntimeException(e);
          }
        }

        protected HiveJoin(RelOptCluster cluster, RelTraitSet traits, RelNode left, RelNode right,
            RexNode condition, JoinRelType joinType, Set<String> variablesStopped,
    - JoinAlgorithm joinAlgo, boolean leftSemiJoin) throws InvalidRelException {
    + JoinAlgorithm joinAlgo, boolean leftSemiJoin) throws InvalidRelException, CalciteSemanticException {
          super(cluster, TraitsUtil.getDefaultTraitSet(cluster), left, right, condition, joinType,
              variablesStopped);
          this.joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(this);
    @@ -97,7 +98,7 @@ public class HiveJoin extends Join implements HiveRelNode {
            Set<String> variablesStopped = Collections.emptySet();
            return new HiveJoin(getCluster(), traitSet, left, right, conditionExpr, joinType,
                variablesStopped, joinAlgorithm, leftSemiJoin);
    - } catch (InvalidRelException e) {
    + } catch (InvalidRelException | CalciteSemanticException e) {
            // Semantic error not possible. Must be a bug. Convert to
            // internal error.
            throw new AssertionError(e);
    @@ -170,7 +171,7 @@ public class HiveJoin extends Join implements HiveRelNode {
          return smallInput;
        }

    - public ImmutableBitSet getSortedInputs() {
    + public ImmutableBitSet getSortedInputs() throws CalciteSemanticException {
          ImmutableBitSet.Builder sortedInputsBuilder = new ImmutableBitSet.Builder();
          JoinPredicateInfo joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.
                  constructJoinPredicateInfo(this);

    http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java
    index 911ceda..7a43f29 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveMultiJoin.java
    @@ -31,6 +31,7 @@ import org.apache.calcite.rel.type.RelDataType;
      import org.apache.calcite.rex.RexNode;
      import org.apache.calcite.rex.RexShuttle;
      import org.apache.calcite.util.Pair;
    +import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
      import org.apache.hadoop.hive.ql.optimizer.calcite.TraitsUtil;
    @@ -60,7 +61,7 @@ public final class HiveMultiJoin extends AbstractRelNode {
         * @param inputs inputs into this multi-join
         * @param condition join filter applicable to this join node
         * @param rowType row type of the join result of this node
    - * @param joinInputs
    + * @param joinInputs
         * @param joinTypes the join type corresponding to each input; if
         * an input is null-generating in a left or right
         * outer join, the entry indicates the type of
    @@ -84,7 +85,11 @@ public final class HiveMultiJoin extends AbstractRelNode {
          this.joinTypes = ImmutableList.copyOf(joinTypes);
          this.outerJoin = containsOuter();

    - this.joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(this);
    + try {
    + this.joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(this);
    + } catch (CalciteSemanticException e) {
    + throw new RuntimeException(e);
    + }
        }


    @@ -105,6 +110,7 @@ public final class HiveMultiJoin extends AbstractRelNode {
              joinTypes);
        }

    + @Override
        public RelWriter explainTerms(RelWriter pw) {
          List<String> joinsString = new ArrayList<String>();
          for (int i = 0; i < joinInputs.size(); i++) {
    @@ -122,10 +128,12 @@ public final class HiveMultiJoin extends AbstractRelNode {
              .item("joinsDescription", joinsString);
        }

    + @Override
        public RelDataType deriveRowType() {
          return rowType;
        }

    + @Override
        public List<RelNode> getInputs() {
          return inputs;
        }
    @@ -134,6 +142,7 @@ public final class HiveMultiJoin extends AbstractRelNode {
          return ImmutableList.of(condition);
        }

    + @Override
        public RelNode accept(RexShuttle shuttle) {
          RexNode joinFilter = shuttle.apply(this.condition);


    http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
    index c5ab055..39c69a4 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveInsertExchange4JoinRule.java
    @@ -32,6 +32,7 @@ import org.apache.calcite.rel.core.Join;
      import org.apache.calcite.rex.RexNode;
      import org.apache.commons.logging.Log;
      import org.apache.commons.logging.LogFactory;
    +import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinLeafPredicateInfo;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
    @@ -75,10 +76,18 @@ public class HiveInsertExchange4JoinRule extends RelOptRule {
          JoinPredicateInfo joinPredInfo;
          if (call.rel(0) instanceof HiveMultiJoin) {
            HiveMultiJoin multiJoin = call.rel(0);
    - joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(multiJoin);
    + try {
    + joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(multiJoin);
    + } catch (CalciteSemanticException e) {
    + throw new RuntimeException(e);
    + }
          } else if (call.rel(0) instanceof Join) {
            Join join = call.rel(0);
    - joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(join);
    + try {
    + joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(join);
    + } catch (CalciteSemanticException e) {
    + throw new RuntimeException(e);
    + }
          } else {
            return;
          }

    http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
    index a4484ec..c4a40bf 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinAddNotNullRule.java
    @@ -39,6 +39,7 @@ import org.apache.calcite.sql.SqlKind;
      import org.apache.calcite.sql.SqlOperator;
      import org.apache.calcite.sql.type.SqlTypeName;
      import org.apache.hadoop.hive.ql.exec.FunctionRegistry;
    +import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinLeafPredicateInfo;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
    @@ -46,12 +47,13 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter;
      import org.apache.hadoop.hive.ql.optimizer.calcite.translator.SqlFunctionConverter;
      import org.apache.hadoop.hive.ql.parse.SemanticException;

    +import com.esotericsoftware.minlog.Log;
      import com.google.common.collect.ImmutableList;

      public final class HiveJoinAddNotNullRule extends RelOptRule {

        private static final String NOT_NULL_FUNC_NAME = "isnotnull";
    -
    +
        /** The singleton. */
        public static final HiveJoinAddNotNullRule INSTANCE =
            new HiveJoinAddNotNullRule(HiveFilter.DEFAULT_FILTER_FACTORY);
    @@ -72,6 +74,7 @@ public final class HiveJoinAddNotNullRule extends RelOptRule {

        //~ Methods ----------------------------------------------------------------

    + @Override
        public void onMatch(RelOptRuleCall call) {
          final Join join = call.rel(0);
          RelNode leftInput = call.rel(1);
    @@ -85,8 +88,13 @@ public final class HiveJoinAddNotNullRule extends RelOptRule {
            return;
          }

    - JoinPredicateInfo joinPredInfo =
    - HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(join);
    + JoinPredicateInfo joinPredInfo;
    + try {
    + joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.constructJoinPredicateInfo(join);
    + } catch (CalciteSemanticException e) {
    + Log.trace("Failed to add is not null filter on join ", e);
    + return;
    + }

          Set<Integer> joinLeftKeyPositions = new HashSet<Integer>();
          Set<Integer> joinRightKeyPositions = new HashSet<Integer>();
    @@ -133,7 +141,7 @@ public final class HiveJoinAddNotNullRule extends RelOptRule {

          call.transformTo(newJoin);
        }
    -
    +
        private static Map<String,RexNode> getNotNullConditions(RelOptCluster cluster,
                RexBuilder rexBuilder, RelNode input, Set<Integer> inputKeyPositions) {


    http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java
    index c5e0e11..a0144f3 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveJoinToMultiJoinRule.java
    @@ -22,7 +22,6 @@ import java.util.List;

      import org.apache.calcite.plan.RelOptRule;
      import org.apache.calcite.plan.RelOptRuleCall;
    -import org.apache.calcite.plan.RelOptUtil;
      import org.apache.calcite.rel.RelNode;
      import org.apache.calcite.rel.core.Join;
      import org.apache.calcite.rel.core.JoinRelType;
    @@ -35,6 +34,9 @@ import org.apache.calcite.rex.RexNode;
      import org.apache.calcite.rex.RexUtil;
      import org.apache.calcite.util.ImmutableBitSet;
      import org.apache.calcite.util.Pair;
    +import org.apache.commons.logging.Log;
    +import org.apache.commons.logging.LogFactory;
    +import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelOptUtil;
    @@ -56,6 +58,7 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {

        private final ProjectFactory projectFactory;

    + private static transient final Log LOG = LogFactory.getLog(HiveJoinToMultiJoinRule.class);

        //~ Constructors -----------------------------------------------------------

    @@ -142,8 +145,14 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
              leftJoinTypes = hmj.getJoinTypes();
            }

    - boolean combinable = isCombinablePredicate(join, join.getCondition(),
    - leftCondition);
    + boolean combinable;
    + try {
    + combinable = isCombinablePredicate(join, join.getCondition(),
    + leftCondition);
    + } catch (CalciteSemanticException e) {
    + LOG.trace("Failed to merge joins", e);
    + combinable = false;
    + }
            if (combinable) {
              newJoinFilters.add(leftCondition);
              for (int i = 0; i < leftJoinInputs.size(); i++) {
    @@ -172,8 +181,14 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
          for (int i=0; i<newInputs.size(); i++) {
            joinKeyExprs.add(new ArrayList<RexNode>());
          }
    - RexNode otherCondition = HiveRelOptUtil.splitJoinCondition(systemFieldList, newInputs, join.getCondition(),
    - joinKeyExprs, filterNulls, null);
    + RexNode otherCondition;
    + try {
    + otherCondition = HiveRelOptUtil.splitHiveJoinCondition(systemFieldList, newInputs, join.getCondition(),
    + joinKeyExprs, filterNulls, null);
    + } catch (CalciteSemanticException e) {
    + LOG.trace("Failed to merge joins", e);
    + return null;
    + }
          // If there are remaining parts in the condition, we bail out
          if (!otherCondition.isAlwaysTrue()) {
            return null;
    @@ -221,7 +236,7 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
        }

        private static boolean isCombinablePredicate(Join join,
    - RexNode condition, RexNode otherCondition) {
    + RexNode condition, RexNode otherCondition) throws CalciteSemanticException {
          final JoinPredicateInfo joinPredInfo = HiveCalciteUtil.JoinPredicateInfo.
                  constructJoinPredicateInfo(join, condition);
          final JoinPredicateInfo otherJoinPredInfo = HiveCalciteUtil.JoinPredicateInfo.
    @@ -236,41 +251,4 @@ public class HiveJoinToMultiJoinRule extends RelOptRule {
          }
          return true;
        }
    -
    - /**
    - * Shifts a filter originating from the right child of the LogicalJoin to the
    - * right, to reflect the filter now being applied on the resulting
    - * MultiJoin.
    - *
    - * @param joinRel the original LogicalJoin
    - * @param left the left child of the LogicalJoin
    - * @param right the right child of the LogicalJoin
    - * @param rightFilter the filter originating from the right child
    - * @return the adjusted right filter
    - */
    - private static RexNode shiftRightFilter(
    - Join joinRel,
    - RelNode left,
    - RelNode right,
    - RexNode rightFilter) {
    - if (rightFilter == null) {
    - return null;
    - }
    -
    - int nFieldsOnLeft = left.getRowType().getFieldList().size();
    - int nFieldsOnRight = right.getRowType().getFieldList().size();
    - int[] adjustments = new int[nFieldsOnRight];
    - for (int i = 0; i < nFieldsOnRight; i++) {
    - adjustments[i] = nFieldsOnLeft;
    - }
    - rightFilter =
    - rightFilter.accept(
    - new RelOptUtil.RexInputConverter(
    - joinRel.getCluster().getRexBuilder(),
    - right.getRowType().getFieldList(),
    - joinRel.getRowType().getFieldList(),
    - adjustments));
    - return rightFilter;
    - }
    -
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/20f2c29f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
    index 960ec40..715f24f 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/stats/HiveRelMdSelectivity.java
    @@ -32,6 +32,7 @@ import org.apache.calcite.rel.metadata.RelMetadataQuery;
      import org.apache.calcite.rex.RexNode;
      import org.apache.calcite.util.BuiltInMethod;
      import org.apache.calcite.util.Pair;
    +import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinLeafPredicateInfo;
      import org.apache.hadoop.hive.ql.optimizer.calcite.HiveCalciteUtil.JoinPredicateInfo;
      import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin;
    @@ -57,14 +58,14 @@ public class HiveRelMdSelectivity extends RelMdSelectivity {
          return 1.0;
        }

    - public Double getSelectivity(HiveJoin j, RexNode predicate) {
    + public Double getSelectivity(HiveJoin j, RexNode predicate) throws CalciteSemanticException {
          if (j.getJoinType().equals(JoinRelType.INNER)) {
            return computeInnerJoinSelectivity(j, predicate);
          }
          return 1.0;
        }

    - private Double computeInnerJoinSelectivity(HiveJoin j, RexNode predicate) {
    + private Double computeInnerJoinSelectivity(HiveJoin j, RexNode predicate) throws CalciteSemanticException {
          double ndvCrossProduct = 1;
          Pair<Boolean, RexNode> predInfo =
              getCombinedPredicateForJoin(j, predicate);
    @@ -183,7 +184,7 @@ public class HiveRelMdSelectivity extends RelMdSelectivity {
        }

        /**
    - *
    + *
         * @param j
         * @param additionalPredicate
         * @return if predicate is the join condition return (true, joinCond)
    @@ -206,7 +207,7 @@ public class HiveRelMdSelectivity extends RelMdSelectivity {

        /**
         * Compute Max NDV to determine Join Selectivity.
    - *
    + *
         * @param jlpi
         * @param colStatMap
         * Immutable Map of Projection Index (in Join Schema) to Column Stat
    @@ -238,5 +239,5 @@ public class HiveRelMdSelectivity extends RelMdSelectivity {

          return maxNDVSoFar;
        }
    -
    +
      }
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11190: No prompting info or warning provided when METASTORE_FILTER_HOOK in authorization V2 is overridden(Dapeng Sun, reviewed by Thejas M Nair and Ferdinand Xu)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/ad1cb15a
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/ad1cb15a
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/ad1cb15a

    Branch: refs/heads/spark
    Commit: ad1cb15a8e35ebc1631996ffda7b4302276483bc
    Parents: e6ea691
    Author: Ferdinand Xu <cheng.a.xu@intel.com>
    Authored: Sun Jul 12 21:08:58 2015 -0400
    Committer: Ferdinand Xu <cheng.a.xu@intel.com>
    Committed: Sun Jul 12 21:09:31 2015 -0400

    ----------------------------------------------------------------------
      .../org/apache/hadoop/hive/ql/session/SessionState.java | 10 +++++++++-
      1 file changed, 9 insertions(+), 1 deletion(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/ad1cb15a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
    index 0bc9a46..49d64db 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
    @@ -74,6 +74,7 @@ import org.apache.hadoop.hive.ql.metadata.Table;
      import org.apache.hadoop.hive.ql.plan.HiveOperation;
      import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider;
      import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider;
    +import org.apache.hadoop.hive.ql.security.authorization.plugin.AuthorizationMetaStoreFilterHook;
      import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer;
      import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizerFactory;
      import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext;
    @@ -764,8 +765,15 @@ public class SessionState {
          if (conf.get(CONFIG_AUTHZ_SETTINGS_APPLIED_MARKER, "").equals(Boolean.TRUE.toString())) {
            return;
          }
    + String metastoreHook = conf.get(ConfVars.METASTORE_FILTER_HOOK.name());
    + if (!ConfVars.METASTORE_FILTER_HOOK.getDefaultValue().equals(metastoreHook) &&
    + !AuthorizationMetaStoreFilterHook.class.getName().equals(metastoreHook)) {
    + LOG.warn(ConfVars.METASTORE_FILTER_HOOK.name() +
    + " will be ignored, since hive.security.authorization.manager" +
    + " is set to instance of HiveAuthorizerFactory.");
    + }
          conf.setVar(ConfVars.METASTORE_FILTER_HOOK,
    - "org.apache.hadoop.hive.ql.security.authorization.plugin.AuthorizationMetaStoreFilterHook");
    + AuthorizationMetaStoreFilterHook.class.getName());

          authorizerV2.applyAuthorizationConfigPolicy(conf);
          // update config in Hive thread local as well and init the metastore client
  • Sunchao at Jul 20, 2015 at 8:12 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
    index bebac54..11e5333 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker.java
    @@ -281,7 +281,7 @@ public class TestWorker extends CompactorTest {
          // Find the new delta file and make sure it has the right contents
          boolean sawNewDelta = false;
          for (int i = 0; i < stat.length; i++) {
    - if (stat[i].getPath().getName().equals("delta_0000021_0000024")) {
    + if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24))) {
              sawNewDelta = true;
              FileStatus[] buckets = fs.listStatus(stat[i].getPath());
              Assert.assertEquals(2, buckets.length);
    @@ -296,6 +296,10 @@ public class TestWorker extends CompactorTest {
          Assert.assertTrue(sawNewDelta);
        }

    + /**
    + * todo: fix https://issues.apache.org/jira/browse/HIVE-9995
    + * @throws Exception
    + */
        @Test
        public void minorWithOpenInMiddle() throws Exception {
          LOG.debug("Starting minorWithOpenInMiddle");
    @@ -321,15 +325,18 @@ public class TestWorker extends CompactorTest {
          // There should still now be 5 directories in the location
          FileSystem fs = FileSystem.get(conf);
          FileStatus[] stat = fs.listStatus(new Path(t.getSd().getLocation()));
    - Assert.assertEquals(5, stat.length);
    + boolean is130 = this instanceof TestWorker2;
    + Assert.assertEquals(is130 ? 5 : 4, stat.length);

          // Find the new delta file and make sure it has the right contents
          Arrays.sort(stat);
          Assert.assertEquals("base_20", stat[0].getPath().getName());
    - Assert.assertEquals("delta_0000021_0000022", stat[1].getPath().getName());
    - Assert.assertEquals("delta_21_22", stat[2].getPath().getName());
    - Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
    - Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
    + if(is130) {//in1.3.0 orig delta is delta_00021_00022_0000 and compacted one is delta_00021_00022...
    + Assert.assertEquals(makeDeltaDirNameCompacted(21, 22), stat[1].getPath().getName());
    + }
    + Assert.assertEquals(makeDeltaDirName(21, 22), stat[1 + (is130 ? 1 : 0)].getPath().getName());
    + Assert.assertEquals(makeDeltaDirName(23, 25), stat[2 + (is130 ? 1 : 0)].getPath().getName());
    + Assert.assertEquals(makeDeltaDirName(26, 27), stat[3 + (is130 ? 1 : 0)].getPath().getName());
        }

        @Test
    @@ -362,10 +369,10 @@ public class TestWorker extends CompactorTest {
          // Find the new delta file and make sure it has the right contents
          Arrays.sort(stat);
          Assert.assertEquals("base_20", stat[0].getPath().getName());
    - Assert.assertEquals("delta_0000021_0000027", stat[1].getPath().getName());
    - Assert.assertEquals("delta_21_22", stat[2].getPath().getName());
    - Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
    - Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
    + Assert.assertEquals(makeDeltaDirName(21, 22), stat[1].getPath().getName());
    + Assert.assertEquals(makeDeltaDirNameCompacted(21, 27), stat[2].getPath().getName());
    + Assert.assertEquals(makeDeltaDirName(23, 25), stat[3].getPath().getName());
    + Assert.assertEquals(makeDeltaDirName(26, 27), stat[4].getPath().getName());
        }

        @Test
    @@ -398,7 +405,7 @@ public class TestWorker extends CompactorTest {
          // Find the new delta file and make sure it has the right contents
          boolean sawNewDelta = false;
          for (int i = 0; i < stat.length; i++) {
    - if (stat[i].getPath().getName().equals("delta_0000021_0000024")) {
    + if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24))) {
              sawNewDelta = true;
              FileStatus[] buckets = fs.listStatus(stat[i].getPath());
              Assert.assertEquals(2, buckets.length);
    @@ -441,7 +448,7 @@ public class TestWorker extends CompactorTest {
          // Find the new delta file and make sure it has the right contents
          boolean sawNewDelta = false;
          for (int i = 0; i < stat.length; i++) {
    - if (stat[i].getPath().getName().equals("delta_0000001_0000004")) {
    + if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(1, 4))) {
              sawNewDelta = true;
              FileStatus[] buckets = fs.listStatus(stat[i].getPath());
              Assert.assertEquals(2, buckets.length);
    @@ -661,7 +668,7 @@ public class TestWorker extends CompactorTest {
          // Find the new delta file and make sure it has the right contents
          boolean sawNewDelta = false;
          for (int i = 0; i < stat.length; i++) {
    - if (stat[i].getPath().getName().equals("delta_0000021_0000024")) {
    + if (stat[i].getPath().getName().equals(makeDeltaDirNameCompacted(21, 24))) {
              sawNewDelta = true;
              FileStatus[] buckets = fs.listStatus(stat[i].getPath());
              Assert.assertEquals(2, buckets.length);
    @@ -760,9 +767,9 @@ public class TestWorker extends CompactorTest {
          Arrays.sort(stat);
          Assert.assertEquals("base_0000022", stat[0].getPath().getName());
          Assert.assertEquals("base_20", stat[1].getPath().getName());
    - Assert.assertEquals("delta_21_22", stat[2].getPath().getName());
    - Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
    - Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
    + Assert.assertEquals(makeDeltaDirName(21, 22), stat[2].getPath().getName());
    + Assert.assertEquals(makeDeltaDirName(23, 25), stat[3].getPath().getName());
    + Assert.assertEquals(makeDeltaDirName(26, 27), stat[4].getPath().getName());
        }

        @Test
    @@ -796,9 +803,13 @@ public class TestWorker extends CompactorTest {
          Arrays.sort(stat);
          Assert.assertEquals("base_0000027", stat[0].getPath().getName());
          Assert.assertEquals("base_20", stat[1].getPath().getName());
    - Assert.assertEquals("delta_21_22", stat[2].getPath().getName());
    - Assert.assertEquals("delta_23_25", stat[3].getPath().getName());
    - Assert.assertEquals("delta_26_27", stat[4].getPath().getName());
    + Assert.assertEquals(makeDeltaDirName(21, 22), stat[2].getPath().getName());
    + Assert.assertEquals(makeDeltaDirName(23, 25), stat[3].getPath().getName());
    + Assert.assertEquals(makeDeltaDirName(26, 27), stat[4].getPath().getName());
    + }
    + @Override
    + boolean useHive130DeltaDirName() {
    + return false;
        }

        @Test

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker2.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker2.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker2.java
    new file mode 100644
    index 0000000..3b5283a
    --- /dev/null
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestWorker2.java
    @@ -0,0 +1,16 @@
    +package org.apache.hadoop.hive.ql.txn.compactor;
    +
    +/**
    + * Same as TestWorker but tests delta file names in Hive 1.3.0 format
    + */
    +public class TestWorker2 extends TestWorker {
    +
    + public TestWorker2() throws Exception {
    + super();
    + }
    +
    + @Override
    + boolean useHive130DeltaDirName() {
    + return true;
    + }
    +}
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11225: Running all Hive UTs or itests executes only small subset of tests(Ferdinand Xu, reviewed by Hari Sankar Sivarama Subramaniyan)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5c94bda9
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5c94bda9
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5c94bda9

    Branch: refs/heads/spark
    Commit: 5c94bda99399d7861ba2c83de707305655231925
    Parents: ad1cb15
    Author: Ferdinand Xu <cheng.a.xu@intel.com>
    Authored: Sun Jul 12 21:50:20 2015 -0400
    Committer: Ferdinand Xu <cheng.a.xu@intel.com>
    Committed: Sun Jul 12 21:50:20 2015 -0400

    ----------------------------------------------------------------------
      pom.xml | 7 ++++---
      1 file changed, 4 insertions(+), 3 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/5c94bda9/pom.xml
    ----------------------------------------------------------------------
    diff --git a/pom.xml b/pom.xml
    index f2cb761..1abf738 100644
    --- a/pom.xml
    +++ b/pom.xml
    @@ -826,9 +826,8 @@
                  <exclude>**/ql/exec/vector/udf/legacy/*.java</exclude>
                  <exclude>**/ql/exec/vector/udf/generic/*.java</exclude>
                  <exclude>**/TestHiveServer2Concurrency.java</exclude>
    - <exclude>**/TestHiveMetaStore.java</exclude>
                  <exclude>${test.excludes.additional}</exclude>
    - <exclude>%regex[${skip.spark.files}]</exclude>
    + <exclude>${skip.spark.files}</exclude>
                </excludes>
                <redirectTestOutputToFile>true</redirectTestOutputToFile>
                <reuseForks>false</reuseForks>
    @@ -1219,7 +1218,9 @@
       </property>
            </activation>
            <properties>
    - <skip.spark.files>.*[TestSparkSessionManagerImpl|TestMultiSessionsHS2WithLocalClusterSpark|TestJdbcWithLocalClusterSpark].class</skip.spark.files>
    + <skip.spark.files>
    + **/ql/exec/spark/session/TestSparkSessionManagerImpl.java,**/TestMultiSessionsHS2WithLocalClusterSpark.java,**/TestJdbcWithLocalClusterSpark.java
    + </skip.spark.files>
            </properties>
          </profile>
        </profiles>
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11232 : CBO: Calcite Operator To Hive Operator (Calcite Return Path): fix the output of select_same_col.q (Pengcheng Xiong via Ashutosh Chauhan)

    Signed-off-by: Ashutosh Chauhan <hashutosh@apache.org>


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/17f759d6
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/17f759d6
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/17f759d6

    Branch: refs/heads/spark
    Commit: 17f759d6332f4f9fb87e4679c01447cd27370420
    Parents: 8121b9a
    Author: Pengcheng Xiong <pxiong@hortonworks.com>
    Authored: Mon Jul 20 02:51:00 2015 -0700
    Committer: Ashutosh Chauhan <hashutosh@apache.org>
    Committed: Mon Jul 13 09:41:55 2015 -0700

    ----------------------------------------------------------------------
      ql/src/test/queries/clientpositive/select_same_col.q | 5 +++--
      ql/src/test/results/clientpositive/select_same_col.q.out | 8 ++++++--
      2 files changed, 9 insertions(+), 4 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/17f759d6/ql/src/test/queries/clientpositive/select_same_col.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/select_same_col.q b/ql/src/test/queries/clientpositive/select_same_col.q
    index d6902c2..21f0d45 100644
    --- a/ql/src/test/queries/clientpositive/select_same_col.q
    +++ b/ql/src/test/queries/clientpositive/select_same_col.q
    @@ -1,6 +1,7 @@
    -
      set hive.cbo.enable=true;

    +-- SORT_BEFORE_DIFF
    +
      drop table srclimit;
      create table srclimit as select * from src limit 10;

    @@ -16,4 +17,4 @@ select *, key, value from srclimit;

      select * from (select *, key, value from srclimit) t;

    -drop table srclimit;
    \ No newline at end of file
    +drop table srclimit;

    http://git-wip-us.apache.org/repos/asf/hive/blob/17f759d6/ql/src/test/results/clientpositive/select_same_col.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/select_same_col.q.out b/ql/src/test/results/clientpositive/select_same_col.q.out
    index 426f716..f7362f0 100644
    --- a/ql/src/test/results/clientpositive/select_same_col.q.out
    +++ b/ql/src/test/results/clientpositive/select_same_col.q.out
    @@ -1,6 +1,10 @@
    -PREHOOK: query: drop table srclimit
    +PREHOOK: query: -- SORT_BEFORE_DIFF
    +
    +drop table srclimit
      PREHOOK: type: DROPTABLE
    -POSTHOOK: query: drop table srclimit
    +POSTHOOK: query: -- SORT_BEFORE_DIFF
    +
    +drop table srclimit
      POSTHOOK: type: DROPTABLE
      PREHOOK: query: create table srclimit as select * from src limit 10
      PREHOOK: type: CREATETABLE_AS_SELECT
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11194 - Exchange partition on external tables should fail with error message when target folder already exists (Aihua Xu, reviewd by Chao Sun)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8121b9ab
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8121b9ab
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8121b9ab

    Branch: refs/heads/spark
    Commit: 8121b9ab644cbe477df477827dd82a9859a7791b
    Parents: 65e9fcf
    Author: Chao Sun <sunchao@apache.org>
    Authored: Mon Jul 13 09:36:22 2015 -0700
    Committer: Chao Sun <sunchao@apache.org>
    Committed: Mon Jul 13 09:36:22 2015 -0700

    ----------------------------------------------------------------------
      .../apache/hadoop/hive/common/FileUtils.java | 9 +++-
      .../hadoop/hive/metastore/HiveMetaStore.java | 2 +-
      .../queries/clientnegative/exchange_partition.q | 19 +++++++
      .../clientnegative/exchange_partition.q.out | 54 ++++++++++++++++++++
      4 files changed, 82 insertions(+), 2 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/8121b9ab/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
    ----------------------------------------------------------------------
    diff --git a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
    index c2c54bc..7e4f386 100644
    --- a/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
    +++ b/common/src/java/org/apache/hadoop/hive/common/FileUtils.java
    @@ -30,7 +30,6 @@ import java.util.List;
      import org.apache.commons.logging.Log;
      import org.apache.commons.logging.LogFactory;
      import org.apache.hadoop.conf.Configuration;
    -import org.apache.hadoop.fs.DefaultFileAccess;
      import org.apache.hadoop.fs.FileStatus;
      import org.apache.hadoop.fs.FileSystem;
      import org.apache.hadoop.fs.FileUtil;
    @@ -635,6 +634,14 @@ public final class FileUtils {
                                     Path destPath, boolean inheritPerms,
                                     Configuration conf) throws IOException {
          LOG.info("Renaming " + sourcePath + " to " + destPath);
    +
    + // If destPath directory exists, rename call will move the sourcePath
    + // into destPath without failing. So check it before renaming.
    + if (fs.exists(destPath)) {
    + throw new IOException("Cannot rename the source path. The destination "
    + + "path already exists.");
    + }
    +
          if (!inheritPerms) {
            //just rename the directory
            return fs.rename(sourcePath, destPath);

    http://git-wip-us.apache.org/repos/asf/hive/blob/8121b9ab/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    index 4c9cd79..920e762 100644
    --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    @@ -2581,7 +2581,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {
              pathCreated = wh.renameDir(sourcePath, destPath);
              success = ms.commitTransaction();
            } finally {
    - if (!success) {
    + if (!success || !pathCreated) {
                ms.rollbackTransaction();
                if (pathCreated) {
                  wh.renameDir(destPath, sourcePath);

    http://git-wip-us.apache.org/repos/asf/hive/blob/8121b9ab/ql/src/test/queries/clientnegative/exchange_partition.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientnegative/exchange_partition.q b/ql/src/test/queries/clientnegative/exchange_partition.q
    new file mode 100644
    index 0000000..7dc4f57
    --- /dev/null
    +++ b/ql/src/test/queries/clientnegative/exchange_partition.q
    @@ -0,0 +1,19 @@
    +dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/ex_table1;
    +dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/ex_table1/part=part1;
    +CREATE EXTERNAL TABLE ex_table1 ( key INT, value STRING)
    + PARTITIONED BY (part STRING)
    + STORED AS textfile
    + LOCATION 'file:${system:test.tmp.dir}/ex_table1';
    +
    +dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/ex_table2;
    +
    +CREATE EXTERNAL TABLE ex_table2 ( key INT, value STRING)
    + PARTITIONED BY (part STRING)
    + STORED AS textfile
    + LOCATION 'file:${system:test.tmp.dir}/ex_table2';
    +
    +INSERT OVERWRITE TABLE ex_table2 PARTITION (part='part1')
    +SELECT key, value FROM src WHERE key < 10;
    +SHOW PARTITIONS ex_table2;
    +
    +ALTER TABLE ex_table1 EXCHANGE PARTITION (part='part1') WITH TABLE ex_table2;

    http://git-wip-us.apache.org/repos/asf/hive/blob/8121b9ab/ql/src/test/results/clientnegative/exchange_partition.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientnegative/exchange_partition.q.out b/ql/src/test/results/clientnegative/exchange_partition.q.out
    new file mode 100644
    index 0000000..b81fb99
    --- /dev/null
    +++ b/ql/src/test/results/clientnegative/exchange_partition.q.out
    @@ -0,0 +1,54 @@
    +PREHOOK: query: CREATE EXTERNAL TABLE ex_table1 ( key INT, value STRING)
    + PARTITIONED BY (part STRING)
    + STORED AS textfile
    +#### A masked pattern was here ####
    +PREHOOK: type: CREATETABLE
    +#### A masked pattern was here ####
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@ex_table1
    +POSTHOOK: query: CREATE EXTERNAL TABLE ex_table1 ( key INT, value STRING)
    + PARTITIONED BY (part STRING)
    + STORED AS textfile
    +#### A masked pattern was here ####
    +POSTHOOK: type: CREATETABLE
    +#### A masked pattern was here ####
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@ex_table1
    +PREHOOK: query: CREATE EXTERNAL TABLE ex_table2 ( key INT, value STRING)
    + PARTITIONED BY (part STRING)
    + STORED AS textfile
    +#### A masked pattern was here ####
    +PREHOOK: type: CREATETABLE
    +#### A masked pattern was here ####
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@ex_table2
    +POSTHOOK: query: CREATE EXTERNAL TABLE ex_table2 ( key INT, value STRING)
    + PARTITIONED BY (part STRING)
    + STORED AS textfile
    +#### A masked pattern was here ####
    +POSTHOOK: type: CREATETABLE
    +#### A masked pattern was here ####
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@ex_table2
    +PREHOOK: query: INSERT OVERWRITE TABLE ex_table2 PARTITION (part='part1')
    +SELECT key, value FROM src WHERE key < 10
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Output: default@ex_table2@part=part1
    +POSTHOOK: query: INSERT OVERWRITE TABLE ex_table2 PARTITION (part='part1')
    +SELECT key, value FROM src WHERE key < 10
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: default@ex_table2@part=part1
    +POSTHOOK: Lineage: ex_table2 PARTITION(part=part1).key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
    +POSTHOOK: Lineage: ex_table2 PARTITION(part=part1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
    +PREHOOK: query: SHOW PARTITIONS ex_table2
    +PREHOOK: type: SHOWPARTITIONS
    +PREHOOK: Input: default@ex_table2
    +POSTHOOK: query: SHOW PARTITIONS ex_table2
    +POSTHOOK: type: SHOWPARTITIONS
    +POSTHOOK: Input: default@ex_table2
    +part=part1
    +PREHOOK: query: ALTER TABLE ex_table1 EXCHANGE PARTITION (part='part1') WITH TABLE ex_table2
    +PREHOOK: type: null
    +FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. MetaException(message:Got exception: java.io.IOException Cannot rename the source path. The destination path already exists.)
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11228 - Mutation API should use semi-shared locks. (Elliot West, via Eugene Koifman)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3301b92b
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3301b92b
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3301b92b

    Branch: refs/heads/spark
    Commit: 3301b92bcb2a1f779e76d174cd9ac6d83fc66938
    Parents: 17f759d
    Author: Eugene Koifman <ekoifman@hortonworks.com>
    Authored: Mon Jul 13 09:42:07 2015 -0700
    Committer: Eugene Koifman <ekoifman@hortonworks.com>
    Committed: Mon Jul 13 09:42:26 2015 -0700

    ----------------------------------------------------------------------
      .../streaming/mutate/client/MutatorClient.java | 11 +-
      .../streaming/mutate/client/lock/Lock.java | 73 +++++++----
      .../hive/hcatalog/streaming/mutate/package.html | 8 +-
      .../streaming/mutate/client/lock/TestLock.java | 121 ++++++++++++-------
      4 files changed, 136 insertions(+), 77 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/3301b92b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java
    ----------------------------------------------------------------------
    diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java
    index 2724525..29b828d 100644
    --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java
    +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/MutatorClient.java
    @@ -42,7 +42,16 @@ public class MutatorClient implements Closeable {
              .lockFailureListener(lockFailureListener == null ? LockFailureListener.NULL_LISTENER : lockFailureListener)
              .user(user);
          for (AcidTable table : tables) {
    - lockOptions.addTable(table.getDatabaseName(), table.getTableName());
    + switch (table.getTableType()) {
    + case SOURCE:
    + lockOptions.addSourceTable(table.getDatabaseName(), table.getTableName());
    + break;
    + case SINK:
    + lockOptions.addSinkTable(table.getDatabaseName(), table.getTableName());
    + break;
    + default:
    + throw new IllegalArgumentException("Unknown TableType: " + table.getTableType());
    + }
          }
        }


    http://git-wip-us.apache.org/repos/asf/hive/blob/3301b92b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
    ----------------------------------------------------------------------
    diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
    index 21604df..ad0b303 100644
    --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
    +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/Lock.java
    @@ -2,6 +2,7 @@ package org.apache.hive.hcatalog.streaming.mutate.client.lock;

      import java.util.ArrayList;
      import java.util.Collection;
    +import java.util.HashSet;
      import java.util.LinkedHashSet;
      import java.util.List;
      import java.util.Set;
    @@ -35,7 +36,8 @@ public class Lock {
        private final IMetaStoreClient metaStoreClient;
        private final HeartbeatFactory heartbeatFactory;
        private final LockFailureListener listener;
    - private final Collection<Table> tableDescriptors;
    + private final Collection<Table> sinks;
    + private final Collection<Table> tables = new HashSet<>();
        private final int lockRetries;
        private final int retryWaitSeconds;
        private final String user;
    @@ -46,23 +48,26 @@ public class Lock {
        private Long transactionId;

        public Lock(IMetaStoreClient metaStoreClient, Options options) {
    - this(metaStoreClient, new HeartbeatFactory(), options.hiveConf, options.listener, options.user,
    - options.descriptors, options.lockRetries, options.retryWaitSeconds);
    + this(metaStoreClient, new HeartbeatFactory(), options.hiveConf, options.listener, options.user, options.sources,
    + options.sinks, options.lockRetries, options.retryWaitSeconds);
        }

        /** Visible for testing only. */
        Lock(IMetaStoreClient metaStoreClient, HeartbeatFactory heartbeatFactory, HiveConf hiveConf,
    - LockFailureListener listener, String user, Collection<Table> tableDescriptors, int lockRetries,
    + LockFailureListener listener, String user, Collection<Table> sources, Collection<Table> sinks, int lockRetries,
            int retryWaitSeconds) {
          this.metaStoreClient = metaStoreClient;
          this.heartbeatFactory = heartbeatFactory;
          this.hiveConf = hiveConf;
          this.user = user;
    - this.tableDescriptors = tableDescriptors;
          this.listener = listener;
          this.lockRetries = lockRetries;
          this.retryWaitSeconds = retryWaitSeconds;

    + this.sinks = sinks;
    + tables.addAll(sources);
    + tables.addAll(sinks);
    +
          if (LockFailureListener.NULL_LISTENER.equals(listener)) {
            LOG.warn("No {} supplied. Data quality and availability cannot be assured.",
                LockFailureListener.class.getSimpleName());
    @@ -77,6 +82,9 @@ public class Lock {

        /** Attempts to acquire a read lock on the table, returns if successful, throws exception otherwise. */
        public void acquire(long transactionId) throws LockException {
    + if (transactionId <= 0) {
    + throw new IllegalArgumentException("Invalid transaction id: " + transactionId);
    + }
          lockId = internalAcquire(transactionId);
          this.transactionId = transactionId;
          initiateHeartbeat();
    @@ -96,19 +104,18 @@ public class Lock {

        @Override
        public String toString() {
    - return "Lock [metaStoreClient=" + metaStoreClient + ", lockId=" + lockId + ", transactionId=" + transactionId
    - + "]";
    + return "Lock [metaStoreClient=" + metaStoreClient + ", lockId=" + lockId + ", transactionId=" + transactionId + "]";
        }

        private long internalAcquire(Long transactionId) throws LockException {
          int attempts = 0;
    - LockRequest request = buildSharedLockRequest(transactionId);
    + LockRequest request = buildLockRequest(transactionId);
          do {
            LockResponse response = null;
            try {
              response = metaStoreClient.lock(request);
            } catch (TException e) {
    - throw new LockException("Unable to acquire lock for tables: [" + join(tableDescriptors) + "]", e);
    + throw new LockException("Unable to acquire lock for tables: [" + join(tables) + "]", e);
            }
            if (response != null) {
              LockState state = response.getState();
    @@ -129,7 +136,7 @@ public class Lock {
            }
            attempts++;
          } while (attempts < lockRetries);
    - throw new LockException("Could not acquire lock on tables: [" + join(tableDescriptors) + "]");
    + throw new LockException("Could not acquire lock on tables: [" + join(tables) + "]");
        }

        private void internalRelease() {
    @@ -142,18 +149,24 @@ public class Lock {
            }
          } catch (TException e) {
            LOG.error("Lock " + lockId + " failed.", e);
    - listener.lockFailed(lockId, transactionId, asStrings(tableDescriptors), e);
    + listener.lockFailed(lockId, transactionId, asStrings(tables), e);
          }
        }

    - private LockRequest buildSharedLockRequest(Long transactionId) {
    + private LockRequest buildLockRequest(Long transactionId) {
    + if (transactionId == null && !sinks.isEmpty()) {
    + throw new IllegalArgumentException("Cannot sink to tables outside of a transaction: sinks=" + asStrings(sinks));
    + }
          LockRequestBuilder requestBuilder = new LockRequestBuilder();
    - for (Table descriptor : tableDescriptors) {
    - LockComponent component = new LockComponentBuilder()
    - .setDbName(descriptor.getDbName())
    - .setTableName(descriptor.getTableName())
    - .setShared()
    - .build();
    + for (Table table : tables) {
    + LockComponentBuilder componentBuilder = new LockComponentBuilder().setDbName(table.getDbName()).setTableName(
    + table.getTableName());
    + if (sinks.contains(table)) {
    + componentBuilder.setSemiShared();
    + } else {
    + componentBuilder.setShared();
    + }
    + LockComponent component = componentBuilder.build();
            requestBuilder.addLockComponent(component);
          }
          if (transactionId != null) {
    @@ -166,8 +179,7 @@ public class Lock {
        private void initiateHeartbeat() {
          int heartbeatPeriod = getHeartbeatPeriod();
          LOG.debug("Heartbeat period {}s", heartbeatPeriod);
    - heartbeat = heartbeatFactory.newInstance(metaStoreClient, listener, transactionId, tableDescriptors, lockId,
    - heartbeatPeriod);
    + heartbeat = heartbeatFactory.newInstance(metaStoreClient, listener, transactionId, tables, lockId, heartbeatPeriod);
        }

        private int getHeartbeatPeriod() {
    @@ -210,22 +222,33 @@ public class Lock {

        /** Constructs a lock options for a set of Hive ACID tables from which we wish to read. */
        public static final class Options {
    - Set<Table> descriptors = new LinkedHashSet<>();
    + Set<Table> sources = new LinkedHashSet<>();
    + Set<Table> sinks = new LinkedHashSet<>();
          LockFailureListener listener = LockFailureListener.NULL_LISTENER;
          int lockRetries = 5;
          int retryWaitSeconds = 30;
          String user;
          HiveConf hiveConf;

    - /** Adds a table for which a shared read lock will be requested. */
    - public Options addTable(String databaseName, String tableName) {
    + /** Adds a table for which a shared lock will be requested. */
    + public Options addSourceTable(String databaseName, String tableName) {
    + addTable(databaseName, tableName, sources);
    + return this;
    + }
    +
    + /** Adds a table for which a semi-shared lock will be requested. */
    + public Options addSinkTable(String databaseName, String tableName) {
    + addTable(databaseName, tableName, sinks);
    + return this;
    + }
    +
    + private void addTable(String databaseName, String tableName, Set<Table> tables) {
            checkNotNullOrEmpty(databaseName);
            checkNotNullOrEmpty(tableName);
            Table table = new Table();
            table.setDbName(databaseName);
            table.setTableName(tableName);
    - descriptors.add(table);
    - return this;
    + tables.add(table);
          }

          public Options user(String user) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/3301b92b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/package.html
    ----------------------------------------------------------------------
    diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/package.html b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/package.html
    index 9fc10b6..09a55b6 100644
    --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/package.html
    +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/package.html
    @@ -421,7 +421,7 @@ automatically (say on a hourly basis). In such cases requiring the Hive
      admin to pre-create the necessary partitions may not be reasonable.
      Consequently the API allows coordinators to create partitions as needed
      (see:
    -<code>MutatorClientBuilder.addTable(String, String, boolean)</code>
    +<code>MutatorClientBuilder.addSinkTable(String, String, boolean)</code>
      ). Partition creation being an atomic action, multiple coordinators can
      race to create the partition, but only one would succeed, so
      coordinators clients need not synchronize when creating a partition. The
    @@ -440,14 +440,14 @@ consistent manner requires the following:
      <ol>
      <li>Obtaining a valid transaction list from the meta store (<code>ValidTxnList</code>).
      </li>
    -<li>Acquiring a read-lock with the meta store and issuing
    -heartbeats (<code>LockImpl</code> can help with this).
    +<li>Acquiring a lock with the meta store and issuing heartbeats (<code>LockImpl</code>
    +can help with this).
      </li>
      <li>Configuring the <code>OrcInputFormat</code> and then reading
      the data. Make sure that you also pull in the <code>ROW__ID</code>
      values. See: <code>AcidRecordReader.getRecordIdentifier</code>.
      </li>
    -<li>Releasing the read-lock.</li>
    +<li>Releasing the lock.</li>
      </ol>
      </p>


    http://git-wip-us.apache.org/repos/asf/hive/blob/3301b92b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java
    ----------------------------------------------------------------------
    diff --git a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java
    index ef1e80c..05f342b 100644
    --- a/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java
    +++ b/hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java
    @@ -19,7 +19,9 @@ import static org.mockito.Mockito.when;

      import java.net.InetAddress;
      import java.util.Collection;
    +import java.util.Collections;
      import java.util.List;
    +import java.util.Set;
      import java.util.Timer;

      import org.apache.hadoop.hive.conf.HiveConf;
    @@ -42,14 +44,17 @@ import org.mockito.Captor;
      import org.mockito.Mock;
      import org.mockito.runners.MockitoJUnitRunner;

    -import com.google.common.collect.ImmutableList;
    +import com.google.common.collect.ImmutableSet;

      @RunWith(MockitoJUnitRunner.class)
      public class TestLock {

    - private static final Table TABLE_1 = createTable("DB", "ONE");
    - private static final Table TABLE_2 = createTable("DB", "TWO");
    - private static final List<Table> TABLES = ImmutableList.of(TABLE_1, TABLE_2);
    + private static final Table SOURCE_TABLE_1 = createTable("DB", "SOURCE_1");
    + private static final Table SOURCE_TABLE_2 = createTable("DB", "SOURCE_2");
    + private static final Table SINK_TABLE = createTable("DB", "SINK");
    + private static final Set<Table> SOURCES = ImmutableSet.of(SOURCE_TABLE_1, SOURCE_TABLE_2);
    + private static final Set<Table> SINKS = ImmutableSet.of(SINK_TABLE);
    + private static final Set<Table> TABLES = ImmutableSet.of(SOURCE_TABLE_1, SOURCE_TABLE_2, SINK_TABLE);
        private static final long LOCK_ID = 42;
        private static final long TRANSACTION_ID = 109;
        private static final String USER = "ewest";
    @@ -67,7 +72,8 @@ public class TestLock {
        @Captor
        private ArgumentCaptor<LockRequest> requestCaptor;

    - private Lock lock;
    + private Lock readLock;
    + private Lock writeLock;
        private HiveConf configuration = new HiveConf();

        @Before
    @@ -79,44 +85,57 @@ public class TestLock {
              mockHeartbeatFactory.newInstance(any(IMetaStoreClient.class), any(LockFailureListener.class), any(Long.class),
                  any(Collection.class), anyLong(), anyInt())).thenReturn(mockHeartbeat);

    - lock = new Lock(mockMetaStoreClient, mockHeartbeatFactory, configuration, mockListener, USER, TABLES, 3, 0);
    + readLock = new Lock(mockMetaStoreClient, mockHeartbeatFactory, configuration, mockListener, USER, SOURCES,
    + Collections.<Table> emptySet(), 3, 0);
    + writeLock = new Lock(mockMetaStoreClient, mockHeartbeatFactory, configuration, mockListener, USER, SOURCES, SINKS,
    + 3, 0);
        }

        @Test
        public void testAcquireReadLockWithNoIssues() throws Exception {
    - lock.acquire();
    - assertEquals(Long.valueOf(LOCK_ID), lock.getLockId());
    - assertNull(lock.getTransactionId());
    + readLock.acquire();
    + assertEquals(Long.valueOf(LOCK_ID), readLock.getLockId());
    + assertNull(readLock.getTransactionId());
    + }
    +
    + @Test(expected = IllegalArgumentException.class)
    + public void testAcquireWriteLockWithoutTxn() throws Exception {
    + writeLock.acquire();
    + }
    +
    + @Test(expected = IllegalArgumentException.class)
    + public void testAcquireWriteLockWithInvalidTxn() throws Exception {
    + writeLock.acquire(0);
        }

        @Test
        public void testAcquireTxnLockWithNoIssues() throws Exception {
    - lock.acquire(TRANSACTION_ID);
    - assertEquals(Long.valueOf(LOCK_ID), lock.getLockId());
    - assertEquals(Long.valueOf(TRANSACTION_ID), lock.getTransactionId());
    + writeLock.acquire(TRANSACTION_ID);
    + assertEquals(Long.valueOf(LOCK_ID), writeLock.getLockId());
    + assertEquals(Long.valueOf(TRANSACTION_ID), writeLock.getTransactionId());
        }

        @Test
        public void testAcquireReadLockCheckHeartbeatCreated() throws Exception {
          configuration.set("hive.txn.timeout", "100s");
    - lock.acquire();
    + readLock.acquire();

    - verify(mockHeartbeatFactory).newInstance(eq(mockMetaStoreClient), eq(mockListener), any(Long.class), eq(TABLES),
    + verify(mockHeartbeatFactory).newInstance(eq(mockMetaStoreClient), eq(mockListener), any(Long.class), eq(SOURCES),
              eq(LOCK_ID), eq(75));
        }

        @Test
        public void testAcquireTxnLockCheckHeartbeatCreated() throws Exception {
          configuration.set("hive.txn.timeout", "100s");
    - lock.acquire(TRANSACTION_ID);
    + writeLock.acquire(TRANSACTION_ID);

    - verify(mockHeartbeatFactory).newInstance(eq(mockMetaStoreClient), eq(mockListener), eq(TRANSACTION_ID), eq(TABLES),
    - eq(LOCK_ID), eq(75));
    + verify(mockHeartbeatFactory).newInstance(eq(mockMetaStoreClient), eq(mockListener), eq(TRANSACTION_ID),
    + eq(TABLES), eq(LOCK_ID), eq(75));
        }

        @Test
        public void testAcquireLockCheckUser() throws Exception {
    - lock.acquire();
    + readLock.acquire();
          verify(mockMetaStoreClient).lock(requestCaptor.capture());
          LockRequest actualRequest = requestCaptor.getValue();
          assertEquals(USER, actualRequest.getUser());
    @@ -124,7 +143,7 @@ public class TestLock {

        @Test
        public void testAcquireReadLockCheckLocks() throws Exception {
    - lock.acquire();
    + readLock.acquire();
          verify(mockMetaStoreClient).lock(requestCaptor.capture());

          LockRequest request = requestCaptor.getValue();
    @@ -137,17 +156,17 @@ public class TestLock {
          assertEquals(2, components.size());

          LockComponent expected1 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "DB");
    - expected1.setTablename("ONE");
    + expected1.setTablename("SOURCE_1");
          assertTrue(components.contains(expected1));

          LockComponent expected2 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "DB");
    - expected2.setTablename("TWO");
    + expected2.setTablename("SOURCE_2");
          assertTrue(components.contains(expected2));
        }

        @Test
        public void testAcquireTxnLockCheckLocks() throws Exception {
    - lock.acquire(TRANSACTION_ID);
    + writeLock.acquire(TRANSACTION_ID);
          verify(mockMetaStoreClient).lock(requestCaptor.capture());

          LockRequest request = requestCaptor.getValue();
    @@ -157,73 +176,77 @@ public class TestLock {

          List<LockComponent> components = request.getComponent();

    - System.out.println(components);
    - assertEquals(2, components.size());
    + assertEquals(3, components.size());

          LockComponent expected1 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "DB");
    - expected1.setTablename("ONE");
    + expected1.setTablename("SOURCE_1");
          assertTrue(components.contains(expected1));

          LockComponent expected2 = new LockComponent(LockType.SHARED_READ, LockLevel.TABLE, "DB");
    - expected2.setTablename("TWO");
    + expected2.setTablename("SOURCE_2");
          assertTrue(components.contains(expected2));
    +
    + LockComponent expected3 = new LockComponent(LockType.SHARED_WRITE, LockLevel.TABLE, "DB");
    + expected3.setTablename("SINK");
    + assertTrue(components.contains(expected3));
        }

        @Test(expected = LockException.class)
        public void testAcquireLockNotAcquired() throws Exception {
          when(mockLockResponse.getState()).thenReturn(NOT_ACQUIRED);
    - lock.acquire();
    + readLock.acquire();
        }

        @Test(expected = LockException.class)
        public void testAcquireLockAborted() throws Exception {
          when(mockLockResponse.getState()).thenReturn(ABORT);
    - lock.acquire();
    + readLock.acquire();
        }

        @Test(expected = LockException.class)
        public void testAcquireLockWithWaitRetriesExceeded() throws Exception {
          when(mockLockResponse.getState()).thenReturn(WAITING, WAITING, WAITING);
    - lock.acquire();
    + readLock.acquire();
        }

        @Test
        public void testAcquireLockWithWaitRetries() throws Exception {
          when(mockLockResponse.getState()).thenReturn(WAITING, WAITING, ACQUIRED);
    - lock.acquire();
    - assertEquals(Long.valueOf(LOCK_ID), lock.getLockId());
    + readLock.acquire();
    + assertEquals(Long.valueOf(LOCK_ID), readLock.getLockId());
        }

        @Test
        public void testReleaseLock() throws Exception {
    - lock.acquire();
    - lock.release();
    + readLock.acquire();
    + readLock.release();
          verify(mockMetaStoreClient).unlock(LOCK_ID);
        }

        @Test
        public void testReleaseLockNoLock() throws Exception {
    - lock.release();
    + readLock.release();
          verifyNoMoreInteractions(mockMetaStoreClient);
        }

        @Test
        public void testReleaseLockCancelsHeartbeat() throws Exception {
    - lock.acquire();
    - lock.release();
    + readLock.acquire();
    + readLock.release();
          verify(mockHeartbeat).cancel();
        }

        @Test
        public void testReadHeartbeat() throws Exception {
    - HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, null, TABLES, LOCK_ID);
    + HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, null, SOURCES, LOCK_ID);
          task.run();
          verify(mockMetaStoreClient).heartbeat(0, LOCK_ID);
        }

        @Test
        public void testTxnHeartbeat() throws Exception {
    - HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, TABLES, LOCK_ID);
    + HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, SOURCES,
    + LOCK_ID);
          task.run();
          verify(mockMetaStoreClient).heartbeat(TRANSACTION_ID, LOCK_ID);
        }
    @@ -232,43 +255,47 @@ public class TestLock {
        public void testReadHeartbeatFailsNoSuchLockException() throws Exception {
          Throwable t = new NoSuchLockException();
          doThrow(t).when(mockMetaStoreClient).heartbeat(0, LOCK_ID);
    - HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, null, TABLES, LOCK_ID);
    + HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, null, SOURCES, LOCK_ID);
          task.run();
    - verify(mockListener).lockFailed(LOCK_ID, null, Lock.asStrings(TABLES), t);
    + verify(mockListener).lockFailed(LOCK_ID, null, Lock.asStrings(SOURCES), t);
        }

        @Test
        public void testTxnHeartbeatFailsNoSuchLockException() throws Exception {
          Throwable t = new NoSuchLockException();
          doThrow(t).when(mockMetaStoreClient).heartbeat(TRANSACTION_ID, LOCK_ID);
    - HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, TABLES, LOCK_ID);
    + HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, SOURCES,
    + LOCK_ID);
          task.run();
    - verify(mockListener).lockFailed(LOCK_ID, TRANSACTION_ID, Lock.asStrings(TABLES), t);
    + verify(mockListener).lockFailed(LOCK_ID, TRANSACTION_ID, Lock.asStrings(SOURCES), t);
        }

        @Test
        public void testHeartbeatFailsNoSuchTxnException() throws Exception {
          Throwable t = new NoSuchTxnException();
          doThrow(t).when(mockMetaStoreClient).heartbeat(TRANSACTION_ID, LOCK_ID);
    - HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, TABLES, LOCK_ID);
    + HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, SOURCES,
    + LOCK_ID);
          task.run();
    - verify(mockListener).lockFailed(LOCK_ID, TRANSACTION_ID, Lock.asStrings(TABLES), t);
    + verify(mockListener).lockFailed(LOCK_ID, TRANSACTION_ID, Lock.asStrings(SOURCES), t);
        }

        @Test
        public void testHeartbeatFailsTxnAbortedException() throws Exception {
          Throwable t = new TxnAbortedException();
          doThrow(t).when(mockMetaStoreClient).heartbeat(TRANSACTION_ID, LOCK_ID);
    - HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, TABLES, LOCK_ID);
    + HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, SOURCES,
    + LOCK_ID);
          task.run();
    - verify(mockListener).lockFailed(LOCK_ID, TRANSACTION_ID, Lock.asStrings(TABLES), t);
    + verify(mockListener).lockFailed(LOCK_ID, TRANSACTION_ID, Lock.asStrings(SOURCES), t);
        }

        @Test
        public void testHeartbeatContinuesTException() throws Exception {
          Throwable t = new TException();
          doThrow(t).when(mockMetaStoreClient).heartbeat(0, LOCK_ID);
    - HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, TABLES, LOCK_ID);
    + HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, SOURCES,
    + LOCK_ID);
          task.run();
          verifyZeroInteractions(mockListener);
        }
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11239: Test encryption_insert_partition_static.q fails with different output results on other environments(Sergio Pena, reviewed by Ferdinand Xu)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/a65bcbdf
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/a65bcbdf
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/a65bcbdf

    Branch: refs/heads/spark
    Commit: a65bcbdf463903a5a9650693d597a4b711abea2f
    Parents: 21aecbc
    Author: Ferdinand Xu <cheng.a.xu@intel.com>
    Authored: Tue Jul 14 04:50:15 2015 -0400
    Committer: Ferdinand Xu <cheng.a.xu@intel.com>
    Committed: Tue Jul 14 04:50:15 2015 -0400

    ----------------------------------------------------------------------
      .../encryption_insert_partition_static.q | 17 -
      .../encryption_insert_partition_static.q.out | 739 +------------------
      2 files changed, 11 insertions(+), 745 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/a65bcbdf/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q b/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
    index c5769a6..69687df 100644
    --- a/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
    +++ b/ql/src/test/queries/clientpositive/encryption_insert_partition_static.q
    @@ -18,11 +18,6 @@ create table unencryptedTable(key string,
          value string) partitioned by (ds string) clustered by (key) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true');

      -- insert encrypted table from values
    -explain extended insert into table encryptedTable partition
    - (ds='today') values
    - ('501', 'val_501'),
    - ('502', 'val_502');
    -
      insert into table encryptedTable partition
          (ds='today') values
          ('501', 'val_501'),
    @@ -31,27 +26,15 @@ insert into table encryptedTable partition
      select * from encryptedTable order by key;

      -- insert encrypted table from unencrypted source
    -explain extended
    -insert into table encryptedTable partition (ds='yesterday')
    -select * from src where key in ('238', '86');
    -
      insert into table encryptedTable partition (ds='yesterday')
      select * from src where key in ('238', '86');

      select * from encryptedTable order by key;

      -- insert unencrypted table from encrypted source
    -explain extended
      insert into table unencryptedTable partition (ds='today')
      select key, value from encryptedTable where ds='today';

    -insert into table unencryptedTable partition (ds='today')
    -select key, value from encryptedTable where ds='today';
    -
    -explain extended
    -insert into table unencryptedTable partition (ds='yesterday')
    -select key, value from encryptedTable where ds='yesterday';
    -
      insert into table unencryptedTable partition (ds='yesterday')
      select key, value from encryptedTable where ds='yesterday';


    http://git-wip-us.apache.org/repos/asf/hive/blob/a65bcbdf/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
    index b10610c..c2f0ddc 100644
    --- a/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
    +++ b/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_static.q.out
    @@ -39,187 +39,23 @@ POSTHOOK: type: CREATETABLE
      POSTHOOK: Output: database:default
      POSTHOOK: Output: default@unencryptedTable
      PREHOOK: query: -- insert encrypted table from values
    -explain extended insert into table encryptedTable partition
    +insert into table encryptedTable partition
          (ds='today') values
          ('501', 'val_501'),
          ('502', 'val_502')
      PREHOOK: type: QUERY
    -POSTHOOK: query: -- insert encrypted table from values
    -explain extended insert into table encryptedTable partition
    - (ds='today') values
    - ('501', 'val_501'),
    - ('502', 'val_502')
    -POSTHOOK: type: QUERY
    -ABSTRACT SYNTAX TREE:
    -
    -TOK_QUERY
    - TOK_FROM
    - null
    - null
    - Values__Tmp__Table__1
    - TOK_INSERT
    - TOK_INSERT_INTO
    - TOK_TAB
    - TOK_TABNAME
    - encryptedTable
    - TOK_PARTSPEC
    - TOK_PARTVAL
    - ds
    - 'today'
    - TOK_SELECT
    - TOK_SELEXPR
    - TOK_ALLCOLREF
    -
    -
    -STAGE DEPENDENCIES:
    - Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    - Stage-2 depends on stages: Stage-0
    -
    -STAGE PLANS:
    - Stage: Stage-1
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - alias: values__tmp__table__1
    - Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - GatherStats: false
    - Select Operator
    - expressions: tmp_values_col1 (type: string), tmp_values_col2 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - sort order:
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col0 (type: string), _col1 (type: string)
    - auto parallelism: false
    - Path -> Alias:
    -#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging
    - Path -> Partition:
    -#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging
    - Partition
    - base file name: Values__Tmp__Table__1
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - properties:
    - bucket_count -1
    - columns tmp_values_col1,tmp_values_col2
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    -#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging
    - name default.values__tmp__table__1
    - serialization.ddl struct values__tmp__table__1 { string tmp_values_col1, string tmp_values_col2}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - properties:
    - bucket_count -1
    - columns tmp_values_col1,tmp_values_col2
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    -#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/.hive-staging
    - name default.values__tmp__table__1
    - serialization.ddl struct values__tmp__table__1 { string tmp_values_col1, string tmp_values_col2}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - name: default.values__tmp__table__1
    - name: default.values__tmp__table__1
    - Truncated Path -> Alias:
    -#### A masked pattern was here ####
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Select Operator
    - expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - GlobalTableId: 1
    -#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=today/.hive-staging
    - NumFilesPerFileSink: 1
    - Static Partition Specification: ds=today/
    - Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    -#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=today/.hive-staging
    - table:
    - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    - properties:
    - bucket_count 2
    - bucket_field_name key
    - columns key,value
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.encryptedtable
    - partition_columns ds
    - partition_columns.types string
    - serialization.ddl struct encryptedtable { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - transactional true
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - name: default.encryptedtable
    - TotalFiles: 1
    - GatherStats: true
    - MultiFileSpray: false
    -
    - Stage: Stage-0
    - Move Operator
    - tables:
    - partition:
    - ds today
    - replace: false
    -#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=today/.hive-staging
    - table:
    - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    - properties:
    - bucket_count 2
    - bucket_field_name key
    - columns key,value
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.encryptedtable
    - partition_columns ds
    - partition_columns.types string
    - serialization.ddl struct encryptedtable { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - transactional true
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - name: default.encryptedtable
    -
    - Stage: Stage-2
    - Stats-Aggr Operator
    -#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=today/.hive-staging
    -
    -PREHOOK: query: insert into table encryptedTable partition
    - (ds='today') values
    - ('501', 'val_501'),
    - ('502', 'val_502')
    -PREHOOK: type: QUERY
    -PREHOOK: Input: default@values__tmp__table__2
    +PREHOOK: Input: default@values__tmp__table__1
      PREHOOK: Output: default@encryptedtable@ds=today
    -POSTHOOK: query: insert into table encryptedTable partition
    +POSTHOOK: query: -- insert encrypted table from values
    +insert into table encryptedTable partition
          (ds='today') values
          ('501', 'val_501'),
          ('502', 'val_502')
      POSTHOOK: type: QUERY
    -POSTHOOK: Input: default@values__tmp__table__2
    +POSTHOOK: Input: default@values__tmp__table__1
      POSTHOOK: Output: default@encryptedtable@ds=today
    -POSTHOOK: Lineage: encryptedtable PARTITION(ds=today).key SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
    -POSTHOOK: Lineage: encryptedtable PARTITION(ds=today).value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
    +POSTHOOK: Lineage: encryptedtable PARTITION(ds=today).key SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
    +POSTHOOK: Lineage: encryptedtable PARTITION(ds=today).value SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
      PREHOOK: query: select * from encryptedTable order by key
      PREHOOK: type: QUERY
      PREHOOK: Input: default@encryptedtable
    @@ -233,196 +69,13 @@ POSTHOOK: Input: default@encryptedtable@ds=today
      501 val_501 today
      502 val_502 today
      PREHOOK: query: -- insert encrypted table from unencrypted source
    -explain extended
      insert into table encryptedTable partition (ds='yesterday')
      select * from src where key in ('238', '86')
      PREHOOK: type: QUERY
    -POSTHOOK: query: -- insert encrypted table from unencrypted source
    -explain extended
    -insert into table encryptedTable partition (ds='yesterday')
    -select * from src where key in ('238', '86')
    -POSTHOOK: type: QUERY
    -ABSTRACT SYNTAX TREE:
    -
    -TOK_QUERY
    - TOK_FROM
    - TOK_TABREF
    - TOK_TABNAME
    - src
    - TOK_INSERT
    - TOK_INSERT_INTO
    - TOK_TAB
    - TOK_TABNAME
    - encryptedTable
    - TOK_PARTSPEC
    - TOK_PARTVAL
    - ds
    - 'yesterday'
    - TOK_SELECT
    - TOK_SELEXPR
    - TOK_ALLCOLREF
    - TOK_WHERE
    - TOK_FUNCTION
    - in
    - TOK_TABLE_OR_COL
    - key
    - '238'
    - '86'
    -
    -
    -STAGE DEPENDENCIES:
    - Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    - Stage-2 depends on stages: Stage-0
    -
    -STAGE PLANS:
    - Stage: Stage-1
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - alias: src
    - Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE
    - GatherStats: false
    - Filter Operator
    - isSamplingPred: false
    - predicate: (key) IN ('238', '86') (type: boolean)
    - Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: key (type: string), value (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - sort order:
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col0 (type: string), _col1 (type: string)
    - auto parallelism: false
    - Path -> Alias:
    -#### A masked pattern was here ####
    - Path -> Partition:
    -#### A masked pattern was here ####
    - Partition
    - base file name: src
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - properties:
    - COLUMN_STATS_ACCURATE true
    - bucket_count -1
    - columns key,value
    - columns.comments 'default','default'
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.src
    - numFiles 1
    - numRows 0
    - rawDataSize 0
    - serialization.ddl struct src { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - totalSize 5812
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - properties:
    - COLUMN_STATS_ACCURATE true
    - bucket_count -1
    - columns key,value
    - columns.comments 'default','default'
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.src
    - numFiles 1
    - numRows 0
    - rawDataSize 0
    - serialization.ddl struct src { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - totalSize 5812
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - name: default.src
    - name: default.src
    - Truncated Path -> Alias:
    - /src [src]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Select Operator
    - expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - GlobalTableId: 1
    -#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=yesterday/.hive-staging
    - NumFilesPerFileSink: 1
    - Static Partition Specification: ds=yesterday/
    - Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE
    -#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=yesterday/.hive-staging
    - table:
    - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    - properties:
    - bucket_count 2
    - bucket_field_name key
    - columns key,value
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.encryptedtable
    - partition_columns ds
    - partition_columns.types string
    - serialization.ddl struct encryptedtable { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - transactional true
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - name: default.encryptedtable
    - TotalFiles: 1
    - GatherStats: true
    - MultiFileSpray: false
    -
    - Stage: Stage-0
    - Move Operator
    - tables:
    - partition:
    - ds yesterday
    - replace: false
    -#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=yesterday/.hive-staging
    - table:
    - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    - properties:
    - bucket_count 2
    - bucket_field_name key
    - columns key,value
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.encryptedtable
    - partition_columns ds
    - partition_columns.types string
    - serialization.ddl struct encryptedtable { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - transactional true
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - name: default.encryptedtable
    -
    - Stage: Stage-2
    - Stats-Aggr Operator
    -#### A PARTIAL masked pattern was here #### data/warehouse/encryptedTable/ds=yesterday/.hive-staging
    -
    -PREHOOK: query: insert into table encryptedTable partition (ds='yesterday')
    -select * from src where key in ('238', '86')
    -PREHOOK: type: QUERY
      PREHOOK: Input: default@src
      PREHOOK: Output: default@encryptedtable@ds=yesterday
    -POSTHOOK: query: insert into table encryptedTable partition (ds='yesterday')
    +POSTHOOK: query: -- insert encrypted table from unencrypted source
    +insert into table encryptedTable partition (ds='yesterday')
      select * from src where key in ('238', '86')
      POSTHOOK: type: QUERY
      POSTHOOK: Input: default@src
    @@ -447,199 +100,14 @@ POSTHOOK: Input: default@encryptedtable@ds=yesterday
      502 val_502 today
      86 val_86 yesterday
      PREHOOK: query: -- insert unencrypted table from encrypted source
    -explain extended
      insert into table unencryptedTable partition (ds='today')
      select key, value from encryptedTable where ds='today'
      PREHOOK: type: QUERY
    -POSTHOOK: query: -- insert unencrypted table from encrypted source
    -explain extended
    -insert into table unencryptedTable partition (ds='today')
    -select key, value from encryptedTable where ds='today'
    -POSTHOOK: type: QUERY
    -ABSTRACT SYNTAX TREE:
    -
    -TOK_QUERY
    - TOK_FROM
    - TOK_TABREF
    - TOK_TABNAME
    - encryptedTable
    - TOK_INSERT
    - TOK_INSERT_INTO
    - TOK_TAB
    - TOK_TABNAME
    - unencryptedTable
    - TOK_PARTSPEC
    - TOK_PARTVAL
    - ds
    - 'today'
    - TOK_SELECT
    - TOK_SELEXPR
    - TOK_TABLE_OR_COL
    - key
    - TOK_SELEXPR
    - TOK_TABLE_OR_COL
    - value
    - TOK_WHERE
    - =
    - TOK_TABLE_OR_COL
    - ds
    - 'today'
    -
    -
    -STAGE DEPENDENCIES:
    - Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    - Stage-2 depends on stages: Stage-0
    -
    -STAGE PLANS:
    - Stage: Stage-1
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - alias: encryptedtable
    - Statistics: Num rows: 6 Data size: 1351 Basic stats: COMPLETE Column stats: NONE
    - GatherStats: false
    - Select Operator
    - expressions: key (type: string), value (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 6 Data size: 1351 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - sort order:
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 6 Data size: 1351 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col0 (type: string), _col1 (type: string)
    - auto parallelism: false
    - Path -> Alias:
    -#### A masked pattern was here ####
    - Path -> Partition:
    -#### A masked pattern was here ####
    - Partition
    - base file name: ds=today
    - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    - partition values:
    - ds today
    - properties:
    - COLUMN_STATS_ACCURATE true
    - bucket_count 2
    - bucket_field_name key
    - columns key,value
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.encryptedtable
    - numFiles 2
    - numRows 0
    - partition_columns ds
    - partition_columns.types string
    - rawDataSize 0
    - serialization.ddl struct encryptedtable { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - totalSize 1351
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    -
    - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    - properties:
    - bucket_count 2
    - bucket_field_name key
    - columns key,value
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.encryptedtable
    - partition_columns ds
    - partition_columns.types string
    - serialization.ddl struct encryptedtable { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - transactional true
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - name: default.encryptedtable
    - name: default.encryptedtable
    - Truncated Path -> Alias:
    - /encryptedTable/ds=today [encryptedtable]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Select Operator
    - expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 6 Data size: 1351 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - GlobalTableId: 1
    -#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging
    - NumFilesPerFileSink: 1
    - Static Partition Specification: ds=today/
    - Statistics: Num rows: 6 Data size: 1351 Basic stats: COMPLETE Column stats: NONE
    -#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging
    - table:
    - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    - properties:
    - bucket_count 2
    - bucket_field_name key
    - columns key,value
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.unencryptedtable
    - partition_columns ds
    - partition_columns.types string
    - serialization.ddl struct unencryptedtable { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - transactional true
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - name: default.unencryptedtable
    - TotalFiles: 1
    - GatherStats: true
    - MultiFileSpray: false
    -
    - Stage: Stage-0
    - Move Operator
    - tables:
    - partition:
    - ds today
    - replace: false
    -#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging
    - table:
    - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    - properties:
    - bucket_count 2
    - bucket_field_name key
    - columns key,value
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.unencryptedtable
    - partition_columns ds
    - partition_columns.types string
    - serialization.ddl struct unencryptedtable { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - transactional true
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - name: default.unencryptedtable
    -
    - Stage: Stage-2
    - Stats-Aggr Operator
    -#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=today/.hive-staging
    -
    -PREHOOK: query: insert into table unencryptedTable partition (ds='today')
    -select key, value from encryptedTable where ds='today'
    -PREHOOK: type: QUERY
      PREHOOK: Input: default@encryptedtable
      PREHOOK: Input: default@encryptedtable@ds=today
      PREHOOK: Output: default@unencryptedtable@ds=today
    -POSTHOOK: query: insert into table unencryptedTable partition (ds='today')
    +POSTHOOK: query: -- insert unencrypted table from encrypted source
    +insert into table unencryptedTable partition (ds='today')
      select key, value from encryptedTable where ds='today'
      POSTHOOK: type: QUERY
      POSTHOOK: Input: default@encryptedtable
    @@ -647,191 +115,6 @@ POSTHOOK: Input: default@encryptedtable@ds=today
      POSTHOOK: Output: default@unencryptedtable@ds=today
      POSTHOOK: Lineage: unencryptedtable PARTITION(ds=today).key SIMPLE [(encryptedtable)encryptedtable.FieldSchema(name:key, type:string, comment:null), ]
      POSTHOOK: Lineage: unencryptedtable PARTITION(ds=today).value SIMPLE [(encryptedtable)encryptedtable.FieldSchema(name:value, type:string, comment:null), ]
    -PREHOOK: query: explain extended
    -insert into table unencryptedTable partition (ds='yesterday')
    -select key, value from encryptedTable where ds='yesterday'
    -PREHOOK: type: QUERY
    -POSTHOOK: query: explain extended
    -insert into table unencryptedTable partition (ds='yesterday')
    -select key, value from encryptedTable where ds='yesterday'
    -POSTHOOK: type: QUERY
    -ABSTRACT SYNTAX TREE:
    -
    -TOK_QUERY
    - TOK_FROM
    - TOK_TABREF
    - TOK_TABNAME
    - encryptedTable
    - TOK_INSERT
    - TOK_INSERT_INTO
    - TOK_TAB
    - TOK_TABNAME
    - unencryptedTable
    - TOK_PARTSPEC
    - TOK_PARTVAL
    - ds
    - 'yesterday'
    - TOK_SELECT
    - TOK_SELEXPR
    - TOK_TABLE_OR_COL
    - key
    - TOK_SELEXPR
    - TOK_TABLE_OR_COL
    - value
    - TOK_WHERE
    - =
    - TOK_TABLE_OR_COL
    - ds
    - 'yesterday'
    -
    -
    -STAGE DEPENDENCIES:
    - Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    - Stage-2 depends on stages: Stage-0
    -
    -STAGE PLANS:
    - Stage: Stage-1
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - alias: encryptedtable
    - Statistics: Num rows: 6 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
    - GatherStats: false
    - Select Operator
    - expressions: key (type: string), value (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 6 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - sort order:
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 6 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col0 (type: string), _col1 (type: string)
    - auto parallelism: false
    - Path -> Alias:
    -#### A masked pattern was here ####
    - Path -> Partition:
    -#### A masked pattern was here ####
    - Partition
    - base file name: ds=yesterday
    - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    - partition values:
    - ds yesterday
    - properties:
    - COLUMN_STATS_ACCURATE true
    - bucket_count 2
    - bucket_field_name key
    - columns key,value
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.encryptedtable
    - numFiles 2
    - numRows 0
    - partition_columns ds
    - partition_columns.types string
    - rawDataSize 0
    - serialization.ddl struct encryptedtable { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - totalSize 1372
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    -
    - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    - properties:
    - bucket_count 2
    - bucket_field_name key
    - columns key,value
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.encryptedtable
    - partition_columns ds
    - partition_columns.types string
    - serialization.ddl struct encryptedtable { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - transactional true
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - name: default.encryptedtable
    - name: default.encryptedtable
    - Truncated Path -> Alias:
    - /encryptedTable/ds=yesterday [encryptedtable]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Select Operator
    - expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 6 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - GlobalTableId: 1
    -#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=yesterday/.hive-staging
    - NumFilesPerFileSink: 1
    - Static Partition Specification: ds=yesterday/
    - Statistics: Num rows: 6 Data size: 1372 Basic stats: COMPLETE Column stats: NONE
    -#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=yesterday/.hive-staging
    - table:
    - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    - properties:
    - bucket_count 2
    - bucket_field_name key
    - columns key,value
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.unencryptedtable
    - partition_columns ds
    - partition_columns.types string
    - serialization.ddl struct unencryptedtable { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - transactional true
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - name: default.unencryptedtable
    - TotalFiles: 1
    - GatherStats: true
    - MultiFileSpray: false
    -
    - Stage: Stage-0
    - Move Operator
    - tables:
    - partition:
    - ds yesterday
    - replace: false
    -#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=yesterday/.hive-staging
    - table:
    - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    - properties:
    - bucket_count 2
    - bucket_field_name key
    - columns key,value
    - columns.comments
    - columns.types string:string
    -#### A masked pattern was here ####
    - name default.unencryptedtable
    - partition_columns ds
    - partition_columns.types string
    - serialization.ddl struct unencryptedtable { string key, string value}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - transactional true
    -#### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    - name: default.unencryptedtable
    -
    - Stage: Stage-2
    - Stats-Aggr Operator
    -#### A PARTIAL masked pattern was here #### data/warehouse/unencryptedtable/ds=yesterday/.hive-staging
    -
      PREHOOK: query: insert into table unencryptedTable partition (ds='yesterday')
      select key, value from encryptedTable where ds='yesterday'
      PREHOOK: type: QUERY
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11211 : Reset the fields in JoinStatsRule in StatsRulesProcFactory (Pengcheng Xiong, reviewed by Laljo John Pullokkaran)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/42326958
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/42326958
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/42326958

    Branch: refs/heads/spark
    Commit: 42326958148c2558be9c3d4dfe44c9e735704617
    Parents: 4d984bd
    Author: Hari Subramaniyan <harisankar@apache.org>
    Authored: Wed Jul 15 13:15:34 2015 -0700
    Committer: Hari Subramaniyan <harisankar@apache.org>
    Committed: Wed Jul 15 13:15:34 2015 -0700

    ----------------------------------------------------------------------
      .../stats/annotation/StatsRulesProcFactory.java | 42 ++++++++++----------
      1 file changed, 22 insertions(+), 20 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/42326958/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
    index 0982059..376d42c 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java
    @@ -1013,17 +1013,14 @@ public class StatsRulesProcFactory {
         */
        public static class JoinStatsRule extends DefaultStatsRule implements NodeProcessor {

    - private boolean pkfkInferred = false;
    - private long newNumRows = 0;
    - private List<Operator<? extends OperatorDesc>> parents;
    - private CommonJoinOperator<? extends JoinDesc> jop;
    - private int numAttr = 1;

          @Override
          public Object process(Node nd, Stack<Node> stack, NodeProcessorCtx procCtx,
              Object... nodeOutputs) throws SemanticException {
    - jop = (CommonJoinOperator<? extends JoinDesc>) nd;
    - parents = jop.getParentOperators();
    + long newNumRows = 0;
    + CommonJoinOperator<? extends JoinDesc> jop = (CommonJoinOperator<? extends JoinDesc>) nd;
    + List<Operator<? extends OperatorDesc>> parents = jop.getParentOperators();
    + int numAttr = 1;
            AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx;
            HiveConf conf = aspCtx.getConf();
            boolean allStatsAvail = true;
    @@ -1062,7 +1059,7 @@ public class StatsRulesProcFactory {
                numAttr = keyExprs.size();

                // infer PK-FK relationship in single attribute join case
    - inferPKFKRelationship();
    + long inferredRowCount = inferPKFKRelationship(numAttr, parents, jop);
                // get the join keys from parent ReduceSink operators
                for (int pos = 0; pos < parents.size(); pos++) {
                  ReduceSinkOperator parent = (ReduceSinkOperator) jop.getParentOperators().get(pos);
    @@ -1149,7 +1146,7 @@ public class StatsRulesProcFactory {

                // update join statistics
                stats.setColumnStats(outColStats);
    - long newRowCount = pkfkInferred ? newNumRows : computeNewRowCount(rowCounts, denom);
    + long newRowCount = inferredRowCount !=-1 ? inferredRowCount : computeNewRowCount(rowCounts, denom);
                updateStatsForJoinType(stats, newRowCount, jop, rowCountParents);
                jop.setStatistics(stats);

    @@ -1180,7 +1177,7 @@ public class StatsRulesProcFactory {
                }

                long maxDataSize = parentSizes.get(maxRowIdx);
    - long newNumRows = StatsUtils.safeMult(StatsUtils.safeMult(maxRowCount, (numParents - 1)), joinFactor);
    + newNumRows = StatsUtils.safeMult(StatsUtils.safeMult(maxRowCount, (numParents - 1)), joinFactor);
                long newDataSize = StatsUtils.safeMult(StatsUtils.safeMult(maxDataSize, (numParents - 1)), joinFactor);
                Statistics wcStats = new Statistics();
                wcStats.setNumRows(newNumRows);
    @@ -1195,15 +1192,17 @@ public class StatsRulesProcFactory {
            return null;
          }

    - private void inferPKFKRelationship() {
    + private long inferPKFKRelationship(int numAttr, List<Operator<? extends OperatorDesc>> parents,
    + CommonJoinOperator<? extends JoinDesc> jop) {
    + long newNumRows = -1;
            if (numAttr == 1) {
              // If numAttr is 1, this means we join on one single key column.
              Map<Integer, ColStatistics> parentsWithPK = getPrimaryKeyCandidates(parents);

              // We only allow one single PK.
              if (parentsWithPK.size() != 1) {
    - LOG.debug("STATS-" + jop.toString() + ": detects multiple PK parents.");
    - return;
    + LOG.debug("STATS-" + jop.toString() + ": detects none/multiple PK parents.");
    + return newNumRows;
              }
              Integer pkPos = parentsWithPK.keySet().iterator().next();
              ColStatistics csPK = parentsWithPK.values().iterator().next();
    @@ -1215,7 +1214,7 @@ public class StatsRulesProcFactory {
              // csfKs.size() + 1 == parents.size() means we have a single PK and all
              // the rest ops are FKs.
              if (csFKs.size() + 1 == parents.size()) {
    - getSelectivity(parents, pkPos, csPK, csFKs);
    + newNumRows = getCardinality(parents, pkPos, csPK, csFKs, jop);

                // some debug information
                if (isDebugEnabled) {
    @@ -1236,16 +1235,17 @@ public class StatsRulesProcFactory {
                }
              }
            }
    + return newNumRows;
          }

          /**
    - * Get selectivity of reduce sink operators.
    + * Get cardinality of reduce sink operators.
           * @param csPK - ColStatistics for a single primary key
           * @param csFKs - ColStatistics for multiple foreign keys
           */
    - private void getSelectivity(List<Operator<? extends OperatorDesc>> ops, Integer pkPos, ColStatistics csPK,
    - Map<Integer, ColStatistics> csFKs) {
    - this.pkfkInferred = true;
    + private long getCardinality(List<Operator<? extends OperatorDesc>> ops, Integer pkPos,
    + ColStatistics csPK, Map<Integer, ColStatistics> csFKs,
    + CommonJoinOperator<? extends JoinDesc> jop) {
            double pkfkSelectivity = Double.MAX_VALUE;
            int fkInd = -1;
            // 1. We iterate through all the operators that have candidate FKs and
    @@ -1290,13 +1290,15 @@ public class StatsRulesProcFactory {
                distinctVals.add(csFK.getCountDistint());
              }
            }
    + long newNumRows;
            if (csFKs.size() == 1) {
              // there is only one FK
    - this.newNumRows = newrows;
    + newNumRows = newrows;
            } else {
              // there is more than one FK
    - this.newNumRows = this.computeNewRowCount(rowCounts, getDenominator(distinctVals));
    + newNumRows = this.computeNewRowCount(rowCounts, getDenominator(distinctVals));
            }
    + return newNumRows;
          }

          private float getSelectivitySimpleTree(Operator<? extends OperatorDesc> op) {
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11216: UDF GenericUDFMapKeys throws NPE when a null map value is passed in (Yibing Shi via Chaoyu Tang, reviewed by Szehon Ho


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/854950b2
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/854950b2
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/854950b2

    Branch: refs/heads/spark
    Commit: 854950b2a0ce5b3885b2be0e7359f04b483f687c
    Parents: e74dc32
    Author: ctang <ctang.ma@gmail.com>
    Authored: Thu Jul 16 14:34:28 2015 -0400
    Committer: ctang <ctang.ma@gmail.com>
    Committed: Thu Jul 16 14:34:28 2015 -0400

    ----------------------------------------------------------------------
      .../apache/hadoop/hive/ql/udf/generic/GenericUDFMapKeys.java | 6 +++++-
      1 file changed, 5 insertions(+), 1 deletion(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/854950b2/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFMapKeys.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFMapKeys.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFMapKeys.java
    index d8afd74..d0cff08 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFMapKeys.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFMapKeys.java
    @@ -19,6 +19,7 @@
      package org.apache.hadoop.hive.ql.udf.generic;

      import java.util.ArrayList;
    +import java.util.Map;

      import org.apache.hadoop.hive.ql.exec.Description;
      import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
    @@ -61,7 +62,10 @@ public class GenericUDFMapKeys extends GenericUDF {
        public Object evaluate(DeferredObject[] arguments) throws HiveException {
          retArray.clear();
          Object mapObj = arguments[0].get();
    - retArray.addAll(mapOI.getMap(mapObj).keySet());
    + Map<?,?> mapVal = mapOI.getMap(mapObj);
    + if (mapVal != null) {
    + retArray.addAll(mapVal.keySet());
    + }
          return retArray;
        }
  • Sunchao at Jul 20, 2015 at 8:12 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/5363af9a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/fouter_join_ppr.q.out b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
    new file mode 100644
    index 0000000..087edf2
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/fouter_join_ppr.q.out
    @@ -0,0 +1,1694 @@
    +PREHOOK: query: -- SORT_QUERY_RESULTS
    +
    +EXPLAIN EXTENDED
    + FROM
    + src a
    + FULL OUTER JOIN
    + srcpart b
    + ON (a.key = b.key AND b.ds = '2008-04-08')
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
    +PREHOOK: type: QUERY
    +POSTHOOK: query: -- SORT_QUERY_RESULTS
    +
    +EXPLAIN EXTENDED
    + FROM
    + src a
    + FULL OUTER JOIN
    + srcpart b
    + ON (a.key = b.key AND b.ds = '2008-04-08')
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_FULLOUTERJOIN
    + TOK_TABREF
    + TOK_TABNAME
    + src
    + a
    + TOK_TABREF
    + TOK_TABNAME
    + srcpart
    + b
    + AND
    + =
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + =
    + .
    + TOK_TABLE_OR_COL
    + b
    + ds
    + '2008-04-08'
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + a
    + value
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + b
    + value
    + TOK_WHERE
    + AND
    + AND
    + AND
    + >
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + 10
    + <
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + 20
    + >
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + 15
    + <
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + 25
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Select Operator
    + expressions: key (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + tag: 0
    + value expressions: _col1 (type: string)
    + auto parallelism: false
    + TableScan
    + alias: b
    + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Select Operator
    + expressions: key (type: string), value (type: string), ds (type: string)
    + outputColumnNames: _col0, _col1, _col2
    + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    + tag: 1
    + value expressions: _col1 (type: string), _col2 (type: string)
    + auto parallelism: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: src
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.src
    + numFiles 1
    + numRows 500
    + rawDataSize 5312
    + serialization.ddl struct src { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.src
    + numFiles 1
    + numRows 500
    + rawDataSize 5312
    + serialization.ddl struct src { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.src
    + name: default.src
    +#### A masked pattern was here ####
    + Partition
    + base file name: hr=11
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2008-04-08
    + hr 11
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + numFiles 1
    + numRows 500
    + partition_columns ds/hr
    + partition_columns.types string:string
    + rawDataSize 5312
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + partition_columns ds/hr
    + partition_columns.types string:string
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.srcpart
    + name: default.srcpart
    +#### A masked pattern was here ####
    + Partition
    + base file name: hr=12
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2008-04-08
    + hr 12
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + numFiles 1
    + numRows 500
    + partition_columns ds/hr
    + partition_columns.types string:string
    + rawDataSize 5312
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + partition_columns ds/hr
    + partition_columns.types string:string
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.srcpart
    + name: default.srcpart
    +#### A masked pattern was here ####
    + Partition
    + base file name: hr=11
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2008-04-09
    + hr 11
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + numFiles 1
    + numRows 500
    + partition_columns ds/hr
    + partition_columns.types string:string
    + rawDataSize 5312
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + partition_columns ds/hr
    + partition_columns.types string:string
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.srcpart
    + name: default.srcpart
    +#### A masked pattern was here ####
    + Partition
    + base file name: hr=12
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2008-04-09
    + hr 12
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + numFiles 1
    + numRows 500
    + partition_columns ds/hr
    + partition_columns.types string:string
    + rawDataSize 5312
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + partition_columns ds/hr
    + partition_columns.types string:string
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.srcpart
    + name: default.srcpart
    + Truncated Path -> Alias:
    + /src [$hdt$_0:a]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:b]
    + /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:b]
    + /srcpart/ds=2008-04-09/hr=11 [$hdt$_1:b]
    + /srcpart/ds=2008-04-09/hr=12 [$hdt$_1:b]
    + Needs Tagging: true
    + Reduce Operator Tree:
    + Join Operator
    + condition map:
    + Outer Join 0 to 1
    + filter mappings:
    + 1 [0, 1]
    + filter predicates:
    + 0
    + 1 {(VALUE._col1 = '2008-04-08')}
    + keys:
    + 0 _col0 (type: string)
    + 1 _col0 (type: string)
    + outputColumnNames: _col0, _col1, _col2, _col3
    + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + isSamplingPred: false
    + predicate: ((((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) and (UDFToDouble(_col2) > 15.0)) and (UDFToDouble(_col2) < 25.0)) (type: boolean)
    + Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + columns _col0,_col1,_col2,_col3
    + columns.types string:string:string:string
    + escape.delim \
    + hive.serialization.extend.additional.nesting.levels true
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: FROM
    + src a
    + FULL OUTER JOIN
    + srcpart b
    + ON (a.key = b.key AND b.ds = '2008-04-08')
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@srcpart
    +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
    +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
    +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
    +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
    +#### A masked pattern was here ####
    +POSTHOOK: query: FROM
    + src a
    + FULL OUTER JOIN
    + srcpart b
    + ON (a.key = b.key AND b.ds = '2008-04-08')
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@srcpart
    +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
    +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
    +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
    +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
    +#### A masked pattern was here ####
    +17 val_17 17 val_17
    +17 val_17 17 val_17
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +19 val_19 19 val_19
    +19 val_19 19 val_19
    +PREHOOK: query: EXPLAIN EXTENDED
    + FROM
    + srcpart a
    + FULL OUTER JOIN
    + src b
    + ON (a.key = b.key AND a.ds = '2008-04-08')
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
    +PREHOOK: type: QUERY
    +POSTHOOK: query: EXPLAIN EXTENDED
    + FROM
    + srcpart a
    + FULL OUTER JOIN
    + src b
    + ON (a.key = b.key AND a.ds = '2008-04-08')
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_FULLOUTERJOIN
    + TOK_TABREF
    + TOK_TABNAME
    + srcpart
    + a
    + TOK_TABREF
    + TOK_TABNAME
    + src
    + b
    + AND
    + =
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + =
    + .
    + TOK_TABLE_OR_COL
    + a
    + ds
    + '2008-04-08'
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + a
    + value
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + b
    + value
    + TOK_WHERE
    + AND
    + AND
    + AND
    + >
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + 10
    + <
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + 20
    + >
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + 15
    + <
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + 25
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Select Operator
    + expressions: key (type: string), value (type: string), ds (type: string)
    + outputColumnNames: _col0, _col1, _col2
    + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    + tag: 0
    + value expressions: _col1 (type: string), _col2 (type: string)
    + auto parallelism: false
    + TableScan
    + alias: b
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Select Operator
    + expressions: key (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + tag: 1
    + value expressions: _col1 (type: string)
    + auto parallelism: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: src
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.src
    + numFiles 1
    + numRows 500
    + rawDataSize 5312
    + serialization.ddl struct src { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.src
    + numFiles 1
    + numRows 500
    + rawDataSize 5312
    + serialization.ddl struct src { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.src
    + name: default.src
    +#### A masked pattern was here ####
    + Partition
    + base file name: hr=11
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2008-04-08
    + hr 11
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + numFiles 1
    + numRows 500
    + partition_columns ds/hr
    + partition_columns.types string:string
    + rawDataSize 5312
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + partition_columns ds/hr
    + partition_columns.types string:string
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.srcpart
    + name: default.srcpart
    +#### A masked pattern was here ####
    + Partition
    + base file name: hr=12
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2008-04-08
    + hr 12
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + numFiles 1
    + numRows 500
    + partition_columns ds/hr
    + partition_columns.types string:string
    + rawDataSize 5312
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + partition_columns ds/hr
    + partition_columns.types string:string
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.srcpart
    + name: default.srcpart
    +#### A masked pattern was here ####
    + Partition
    + base file name: hr=11
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2008-04-09
    + hr 11
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + numFiles 1
    + numRows 500
    + partition_columns ds/hr
    + partition_columns.types string:string
    + rawDataSize 5312
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + partition_columns ds/hr
    + partition_columns.types string:string
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.srcpart
    + name: default.srcpart
    +#### A masked pattern was here ####
    + Partition
    + base file name: hr=12
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2008-04-09
    + hr 12
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + numFiles 1
    + numRows 500
    + partition_columns ds/hr
    + partition_columns.types string:string
    + rawDataSize 5312
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + partition_columns ds/hr
    + partition_columns.types string:string
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.srcpart
    + name: default.srcpart
    + Truncated Path -> Alias:
    + /src [$hdt$_1:b]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:a]
    + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:a]
    + /srcpart/ds=2008-04-09/hr=11 [$hdt$_0:a]
    + /srcpart/ds=2008-04-09/hr=12 [$hdt$_0:a]
    + Needs Tagging: true
    + Reduce Operator Tree:
    + Join Operator
    + condition map:
    + Outer Join 0 to 1
    + filter mappings:
    + 0 [1, 1]
    + filter predicates:
    + 0 {(VALUE._col1 = '2008-04-08')}
    + 1
    + keys:
    + 0 _col0 (type: string)
    + 1 _col0 (type: string)
    + outputColumnNames: _col0, _col1, _col3, _col4
    + Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + isSamplingPred: false
    + predicate: ((((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) and (UDFToDouble(_col3) > 15.0)) and (UDFToDouble(_col3) < 25.0)) (type: boolean)
    + Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
    + outputColumnNames: _col0, _col1, _col2, _col3
    + Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + columns _col0,_col1,_col2,_col3
    + columns.types string:string:string:string
    + escape.delim \
    + hive.serialization.extend.additional.nesting.levels true
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: FROM
    + srcpart a
    + FULL OUTER JOIN
    + src b
    + ON (a.key = b.key AND a.ds = '2008-04-08')
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@srcpart
    +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
    +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
    +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
    +PREHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
    +#### A masked pattern was here ####
    +POSTHOOK: query: FROM
    + srcpart a
    + FULL OUTER JOIN
    + src b
    + ON (a.key = b.key AND a.ds = '2008-04-08')
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@srcpart
    +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
    +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
    +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=11
    +POSTHOOK: Input: default@srcpart@ds=2008-04-09/hr=12
    +#### A masked pattern was here ####
    +17 val_17 17 val_17
    +17 val_17 17 val_17
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +19 val_19 19 val_19
    +19 val_19 19 val_19
    +PREHOOK: query: EXPLAIN EXTENDED
    + FROM
    + src a
    + FULL OUTER JOIN
    + srcpart b
    + ON (a.key = b.key)
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
    +PREHOOK: type: QUERY
    +POSTHOOK: query: EXPLAIN EXTENDED
    + FROM
    + src a
    + FULL OUTER JOIN
    + srcpart b
    + ON (a.key = b.key)
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_FULLOUTERJOIN
    + TOK_TABREF
    + TOK_TABNAME
    + src
    + a
    + TOK_TABREF
    + TOK_TABNAME
    + srcpart
    + b
    + =
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + a
    + value
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + b
    + value
    + TOK_WHERE
    + AND
    + AND
    + AND
    + AND
    + >
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + 10
    + <
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + 20
    + >
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + 15
    + <
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + 25
    + =
    + .
    + TOK_TABLE_OR_COL
    + b
    + ds
    + '2008-04-08'
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
    + Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: key (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
    + tag: 0
    + value expressions: _col1 (type: string)
    + auto parallelism: false
    + TableScan
    + alias: b
    + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: ((UDFToDouble(key) > 15.0) and (UDFToDouble(key) < 25.0)) (type: boolean)
    + Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: key (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
    + tag: 1
    + value expressions: _col1 (type: string)
    + auto parallelism: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: src
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.src
    + numFiles 1
    + numRows 500
    + rawDataSize 5312
    + serialization.ddl struct src { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.src
    + numFiles 1
    + numRows 500
    + rawDataSize 5312
    + serialization.ddl struct src { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.src
    + name: default.src
    +#### A masked pattern was here ####
    + Partition
    + base file name: hr=11
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2008-04-08
    + hr 11
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + numFiles 1
    + numRows 500
    + partition_columns ds/hr
    + partition_columns.types string:string
    + rawDataSize 5312
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + partition_columns ds/hr
    + partition_columns.types string:string
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.srcpart
    + name: default.srcpart
    +#### A masked pattern was here ####
    + Partition
    + base file name: hr=12
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2008-04-08
    + hr 12
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + numFiles 1
    + numRows 500
    + partition_columns ds/hr
    + partition_columns.types string:string
    + rawDataSize 5312
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + partition_columns ds/hr
    + partition_columns.types string:string
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.srcpart
    + name: default.srcpart
    + Truncated Path -> Alias:
    + /src [$hdt$_0:a]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:b]
    + /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:b]
    + Needs Tagging: true
    + Reduce Operator Tree:
    + Join Operator
    + condition map:
    + Right Outer Join0 to 1
    + keys:
    + 0 _col0 (type: string)
    + 1 _col0 (type: string)
    + outputColumnNames: _col0, _col1, _col2, _col3
    + Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + isSamplingPred: false
    + predicate: ((UDFToDouble(_col0) > 10.0) and (UDFToDouble(_col0) < 20.0)) (type: boolean)
    + Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + columns _col0,_col1,_col2,_col3
    + columns.types string:string:string:string
    + escape.delim \
    + hive.serialization.extend.additional.nesting.levels true
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: FROM
    + src a
    + FULL OUTER JOIN
    + srcpart b
    + ON (a.key = b.key)
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@srcpart
    +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
    +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
    +#### A masked pattern was here ####
    +POSTHOOK: query: FROM
    + src a
    + FULL OUTER JOIN
    + srcpart b
    + ON (a.key = b.key)
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND b.ds = '2008-04-08'
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@srcpart
    +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
    +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
    +#### A masked pattern was here ####
    +17 val_17 17 val_17
    +17 val_17 17 val_17
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +19 val_19 19 val_19
    +19 val_19 19 val_19
    +PREHOOK: query: EXPLAIN EXTENDED
    + FROM
    + srcpart a
    + FULL OUTER JOIN
    + src b
    + ON (a.key = b.key)
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08'
    +PREHOOK: type: QUERY
    +POSTHOOK: query: EXPLAIN EXTENDED
    + FROM
    + srcpart a
    + FULL OUTER JOIN
    + src b
    + ON (a.key = b.key)
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08'
    +POSTHOOK: type: QUERY
    +ABSTRACT SYNTAX TREE:
    +
    +TOK_QUERY
    + TOK_FROM
    + TOK_FULLOUTERJOIN
    + TOK_TABREF
    + TOK_TABNAME
    + srcpart
    + a
    + TOK_TABREF
    + TOK_TABNAME
    + src
    + b
    + =
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + TOK_INSERT
    + TOK_DESTINATION
    + TOK_DIR
    + TOK_TMP_FILE
    + TOK_SELECT
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + a
    + value
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + TOK_SELEXPR
    + .
    + TOK_TABLE_OR_COL
    + b
    + value
    + TOK_WHERE
    + AND
    + AND
    + AND
    + AND
    + >
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + 10
    + <
    + .
    + TOK_TABLE_OR_COL
    + a
    + key
    + 20
    + >
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + 15
    + <
    + .
    + TOK_TABLE_OR_COL
    + b
    + key
    + 25
    + =
    + .
    + TOK_TABLE_OR_COL
    + a
    + ds
    + '2008-04-08'
    +
    +
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
    + Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: key (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 111 Data size: 1179 Basic stats: COMPLETE Column stats: NONE
    + tag: 0
    + value expressions: _col1 (type: string)
    + auto parallelism: false
    + TableScan
    + alias: b
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + GatherStats: false
    + Filter Operator
    + isSamplingPred: false
    + predicate: ((UDFToDouble(key) > 10.0) and (UDFToDouble(key) < 20.0)) (type: boolean)
    + Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: key (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 55 Data size: 584 Basic stats: COMPLETE Column stats: NONE
    + tag: 1
    + value expressions: _col1 (type: string)
    + auto parallelism: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: src
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.src
    + numFiles 1
    + numRows 500
    + rawDataSize 5312
    + serialization.ddl struct src { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.src
    + numFiles 1
    + numRows 500
    + rawDataSize 5312
    + serialization.ddl struct src { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.src
    + name: default.src
    +#### A masked pattern was here ####
    + Partition
    + base file name: hr=11
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2008-04-08
    + hr 11
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + numFiles 1
    + numRows 500
    + partition_columns ds/hr
    + partition_columns.types string:string
    + rawDataSize 5312
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + partition_columns ds/hr
    + partition_columns.types string:string
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.srcpart
    + name: default.srcpart
    +#### A masked pattern was here ####
    + Partition
    + base file name: hr=12
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + partition values:
    + ds 2008-04-08
    + hr 12
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + numFiles 1
    + numRows 500
    + partition_columns ds/hr
    + partition_columns.types string:string
    + rawDataSize 5312
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 5812
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key,value
    + columns.comments 'default','default'
    + columns.types string:string
    +#### A masked pattern was here ####
    + name default.srcpart
    + partition_columns ds/hr
    + partition_columns.types string:string
    + serialization.ddl struct srcpart { string key, string value}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.srcpart
    + name: default.srcpart
    + Truncated Path -> Alias:
    + /src [$hdt$_1:b]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:a]
    + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:a]
    + Needs Tagging: true
    + Reduce Operator Tree:
    + Join Operator
    + condition map:
    + Left Outer Join0 to 1
    + keys:
    + 0 _col0 (type: string)
    + 1 _col0 (type: string)
    + outputColumnNames: _col0, _col1, _col3, _col4
    + Statistics: Num rows: 122 Data size: 1296 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + isSamplingPred: false
    + predicate: ((UDFToDouble(_col3) > 15.0) and (UDFToDouble(_col3) < 25.0)) (type: boolean)
    + Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col4 (type: string)
    + outputColumnNames: _col0, _col1, _col2, _col3
    + Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 13 Data size: 138 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + columns _col0,_col1,_col2,_col3
    + columns.types string:string:string:string
    + escape.delim \
    + hive.serialization.extend.additional.nesting.levels true
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: FROM
    + srcpart a
    + FULL OUTER JOIN
    + src b
    + ON (a.key = b.key)
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08'
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@srcpart
    +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
    +PREHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
    +#### A masked pattern was here ####
    +POSTHOOK: query: FROM
    + srcpart a
    + FULL OUTER JOIN
    + src b
    + ON (a.key = b.key)
    + SELECT a.key, a.value, b.key, b.value
    + WHERE a.key > 10 AND a.key < 20 AND b.key > 15 AND b.key < 25 AND a.ds = '2008-04-08'
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@srcpart
    +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=11
    +POSTHOOK: Input: default@srcpart@ds=2008-04-08/hr=12
    +#### A masked pattern was here ####
    +17 val_17 17 val_17
    +17 val_17 17 val_17
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +18 val_18 18 val_18
    +19 val_19 19 val_19
    +19 val_19 19 val_19
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11030 - Enhance storage layer to create one delta file per write (Eugene Koifman, reviewed by Alan Gates)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/66feedc5
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/66feedc5
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/66feedc5

    Branch: refs/heads/spark
    Commit: 66feedc5569de959a383e0a58d9e8768bbad0e2c
    Parents: 5c94bda
    Author: Eugene Koifman <ekoifman@hortonworks.com>
    Authored: Mon Jul 13 09:11:28 2015 -0700
    Committer: Eugene Koifman <ekoifman@hortonworks.com>
    Committed: Mon Jul 13 09:11:28 2015 -0700

    ----------------------------------------------------------------------
      .../streaming/AbstractRecordWriter.java | 4 +-
      .../streaming/mutate/worker/MutatorImpl.java | 4 +-
      .../java/org/apache/hadoop/hive/ql/Driver.java | 1 +
      .../hadoop/hive/ql/io/AcidInputFormat.java | 60 +++++++-
      .../hadoop/hive/ql/io/AcidOutputFormat.java | 49 +++++-
      .../org/apache/hadoop/hive/ql/io/AcidUtils.java | 152 +++++++++++++++----
      .../hadoop/hive/ql/io/HiveFileFormatUtils.java | 19 +--
      .../hadoop/hive/ql/io/orc/OrcInputFormat.java | 20 +--
      .../hadoop/hive/ql/io/orc/OrcNewSplit.java | 13 +-
      .../hive/ql/io/orc/OrcRawRecordMerger.java | 66 ++++++--
      .../hadoop/hive/ql/io/orc/OrcRecordUpdater.java | 58 +++++++
      .../apache/hadoop/hive/ql/io/orc/OrcSplit.java | 16 +-
      .../hadoop/hive/ql/lockmgr/DbTxnManager.java | 20 ++-
      .../hadoop/hive/ql/lockmgr/DummyTxnManager.java | 4 +
      .../hadoop/hive/ql/lockmgr/HiveTxnManager.java | 3 +
      .../hadoop/hive/ql/parse/SemanticAnalyzer.java | 3 +-
      .../hadoop/hive/ql/plan/FileSinkDesc.java | 27 +++-
      .../hive/ql/txn/compactor/CompactorMR.java | 4 +-
      .../hive/ql/exec/TestFileSinkOperator.java | 3 +-
      .../apache/hadoop/hive/ql/io/TestAcidUtils.java | 73 ++++++++-
      .../hive/ql/io/orc/TestInputOutputFormat.java | 13 +-
      .../hive/ql/io/orc/TestOrcRawRecordMerger.java | 57 ++++---
      .../hive/ql/io/orc/TestOrcRecordUpdater.java | 6 +-
      .../hive/ql/txn/compactor/CompactorTest.java | 20 ++-
      .../hive/ql/txn/compactor/TestCleaner.java | 8 +-
      .../hive/ql/txn/compactor/TestCleaner2.java | 14 ++
      .../hive/ql/txn/compactor/TestInitiator.java | 4 +
      .../hive/ql/txn/compactor/TestWorker.java | 49 +++---
      .../hive/ql/txn/compactor/TestWorker2.java | 16 ++
      29 files changed, 645 insertions(+), 141 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
    ----------------------------------------------------------------------
    diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
    index ed46bca..c959222 100644
    --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
    +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java
    @@ -143,7 +143,9 @@ abstract class AbstractRecordWriter implements RecordWriter {
                            .inspector(getSerde().getObjectInspector())
                            .bucket(bucketId)
                            .minimumTransactionId(minTxnId)
    - .maximumTransactionId(maxTxnID));
    + .maximumTransactionId(maxTxnID)
    + .statementId(-1)
    + .finalDestination(partitionPath));
          } catch (SerDeException e) {
            throw new SerializationError("Failed to get object inspector from Serde "
                    + getSerde().getClass().getName(), e);

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java
    ----------------------------------------------------------------------
    diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java
    index 0fe41d5..52062f8 100644
    --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java
    +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/worker/MutatorImpl.java
    @@ -78,7 +78,9 @@ public class MutatorImpl implements Mutator {
                  .bucket(bucketId)
                  .minimumTransactionId(transactionId)
                  .maximumTransactionId(transactionId)
    - .recordIdColumn(recordIdColumn));
    + .recordIdColumn(recordIdColumn)
    + .finalDestination(partitionPath)
    + .statementId(-1));
        }

      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    index 934cb42..b74e5fa 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    @@ -988,6 +988,7 @@ public class Driver implements CommandProcessor {
              if (acidSinks != null) {
                for (FileSinkDesc desc : acidSinks) {
                  desc.setTransactionId(txnId);
    + desc.setStatementId(txnMgr.getStatementId());
                }
              }


    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
    index e1d2395..24506b7 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java
    @@ -22,13 +22,19 @@ import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.fs.Path;
      import org.apache.hadoop.hive.common.ValidTxnList;
      import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
    +import org.apache.hadoop.io.Writable;
      import org.apache.hadoop.io.WritableComparable;
      import org.apache.hadoop.mapred.InputFormat;
      import org.apache.hadoop.mapred.InputSplit;
      import org.apache.hadoop.mapred.RecordReader;
      import org.apache.hadoop.mapred.Reporter;

    +import java.io.DataInput;
    +import java.io.DataOutput;
      import java.io.IOException;
    +import java.util.ArrayList;
    +import java.util.Collections;
    +import java.util.List;

      /**
       * The interface required for input formats that what to support ACID
    @@ -62,7 +68,7 @@ import java.io.IOException;
       * <li>New format -
       * <pre>
       * $partition/base_$tid/$bucket
    - * delta_$tid_$tid/$bucket
    + * delta_$tid_$tid_$stid/$bucket
       * </pre></li>
       * </ul>
       * <p>
    @@ -71,6 +77,8 @@ import java.io.IOException;
       * stored sorted by the original transaction id (ascending), bucket (ascending),
       * row id (ascending), and current transaction id (descending). Thus the files
       * can be merged by advancing through the files in parallel.
    + * The stid is unique id (within the transaction) of the statement that created
    + * this delta file.
       * <p>
       * The base files include all transactions from the beginning of time
       * (transaction id 0) to the transaction in the directory name. Delta
    @@ -91,7 +99,7 @@ import java.io.IOException;
       * For row-at-a-time processing, KEY can conveniently pass RowId into the operator
       * pipeline. For vectorized execution the KEY could perhaps represent a range in the batch.
       * Since {@link org.apache.hadoop.hive.ql.io.orc.OrcInputFormat} is declared to return
    - * {@code NullWritable} key, {@link org.apache.hadoop.hive.ql.io.AcidRecordReader} is defined
    + * {@code NullWritable} key, {@link org.apache.hadoop.hive.ql.io.AcidInputFormat.AcidRecordReader} is defined
       * to provide access to the RowId. Other implementations of AcidInputFormat can use either
       * mechanism.
       * </p>
    @@ -101,6 +109,54 @@ import java.io.IOException;
      public interface AcidInputFormat<KEY extends WritableComparable, VALUE>
          extends InputFormat<KEY, VALUE>, InputFormatChecker {

    + static final class DeltaMetaData implements Writable {
    + private long minTxnId;
    + private long maxTxnId;
    + private List<Integer> stmtIds;
    +
    + public DeltaMetaData() {
    + this(0,0,null);
    + }
    + DeltaMetaData(long minTxnId, long maxTxnId, List<Integer> stmtIds) {
    + this.minTxnId = minTxnId;
    + this.maxTxnId = maxTxnId;
    + this.stmtIds = stmtIds;
    + }
    + long getMinTxnId() {
    + return minTxnId;
    + }
    + long getMaxTxnId() {
    + return maxTxnId;
    + }
    + List<Integer> getStmtIds() {
    + return stmtIds;
    + }
    + @Override
    + public void write(DataOutput out) throws IOException {
    + out.writeLong(minTxnId);
    + out.writeLong(maxTxnId);
    + out.writeInt(stmtIds.size());
    + if(stmtIds == null) {
    + return;
    + }
    + for(Integer id : stmtIds) {
    + out.writeInt(id);
    + }
    + }
    + @Override
    + public void readFields(DataInput in) throws IOException {
    + minTxnId = in.readLong();
    + maxTxnId = in.readLong();
    + int numStatements = in.readInt();
    + if(numStatements <= 0) {
    + return;
    + }
    + stmtIds = new ArrayList<>();
    + for(int i = 0; i < numStatements; i++) {
    + stmtIds.add(in.readInt());
    + }
    + }
    + }
        /**
         * Options for controlling the record readers.
         */

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
    index 0d537e1..dd90a95 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java
    @@ -39,7 +39,7 @@ public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveO
        /**
         * Options to control how the files are written
         */
    - public static class Options {
    + public static class Options implements Cloneable {
          private final Configuration configuration;
          private FileSystem fs;
          private ObjectInspector inspector;
    @@ -53,7 +53,9 @@ public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveO
          private PrintStream dummyStream = null;
          private boolean oldStyle = false;
          private int recIdCol = -1; // Column the record identifier is in, -1 indicates no record id
    -
    + //unique within a transaction
    + private int statementId = 0;
    + private Path finalDestination;
          /**
           * Create the options object.
           * @param conf Use the given configuration
    @@ -63,6 +65,18 @@ public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveO
          }

          /**
    + * shallow clone
    + */
    + @Override
    + public Options clone() {
    + try {
    + return (Options)super.clone();
    + }
    + catch(CloneNotSupportedException ex) {
    + throw new RuntimeException("clone() not properly implemented: " + ex.getMessage(), ex);
    + }
    + }
    + /**
           * Use the given ObjectInspector for each record written.
           * @param inspector the inspector to use.
           * @return this
    @@ -185,6 +199,31 @@ public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveO
            return this;
          }

    + /**
    + * @since 1.3.0
    + * This can be set to -1 to make the system generate old style (delta_xxxx_yyyy) file names.
    + * This is primarily needed for testing to make sure 1.3 code can still read files created
    + * by older code. Also used by Comactor.
    + */
    + public Options statementId(int id) {
    + if(id >= AcidUtils.MAX_STATEMENTS_PER_TXN) {
    + throw new RuntimeException("Too many statements for transactionId: " + maximumTransactionId);
    + }
    + if(id < -1) {
    + throw new IllegalArgumentException("Illegal statementId value: " + id);
    + }
    + this.statementId = id;
    + return this;
    + }
    + /**
    + * @param p where the data for this operation will eventually end up;
    + * basically table or partition directory in FS
    + */
    + public Options finalDestination(Path p) {
    + this.finalDestination = p;
    + return this;
    + }
    +
          public Configuration getConfiguration() {
            return configuration;
          }
    @@ -236,6 +275,12 @@ public interface AcidOutputFormat<K extends WritableComparable, V> extends HiveO
          boolean getOldStyle() {
            return oldStyle;
          }
    + public int getStatementId() {
    + return statementId;
    + }
    + public Path getFinalDestination() {
    + return finalDestination;
    + }
        }

        /**

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
    index 2214733..c7e0780 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java
    @@ -67,6 +67,15 @@ public class AcidUtils {
        };
        public static final String BUCKET_DIGITS = "%05d";
        public static final String DELTA_DIGITS = "%07d";
    + /**
    + * 10K statements per tx. Probably overkill ... since that many delta files
    + * would not be good for performance
    + */
    + public static final String STATEMENT_DIGITS = "%04d";
    + /**
    + * This must be in sync with {@link #STATEMENT_DIGITS}
    + */
    + public static final int MAX_STATEMENTS_PER_TXN = 10000;
        public static final Pattern BUCKET_DIGIT_PATTERN = Pattern.compile("[0-9]{5}$");
        public static final Pattern LEGACY_BUCKET_DIGIT_PATTERN = Pattern.compile("^[0-9]{5}");
        public static final PathFilter originalBucketFilter = new PathFilter() {
    @@ -79,7 +88,7 @@ public class AcidUtils {
        private AcidUtils() {
          // NOT USED
        }
    - private static final Log LOG = LogFactory.getLog(AcidUtils.class.getName());
    + private static final Log LOG = LogFactory.getLog(AcidUtils.class);

        private static final Pattern ORIGINAL_PATTERN =
            Pattern.compile("[0-9]+_[0-9]+");
    @@ -104,12 +113,23 @@ public class AcidUtils {
              BUCKET_PREFIX + String.format(BUCKET_DIGITS, bucket));
        }

    - private static String deltaSubdir(long min, long max) {
    + /**
    + * This is format of delta dir name prior to Hive 1.3.x
    + */
    + public static String deltaSubdir(long min, long max) {
          return DELTA_PREFIX + String.format(DELTA_DIGITS, min) + "_" +
              String.format(DELTA_DIGITS, max);
        }

        /**
    + * Each write statement in a transaction creates its own delta dir.
    + * @since 1.3.x
    + */
    + public static String deltaSubdir(long min, long max, int statementId) {
    + return deltaSubdir(min, max) + "_" + String.format(STATEMENT_DIGITS, statementId);
    + }
    +
    + /**
         * Create a filename for a bucket file.
         * @param directory the partition directory
         * @param options the options for writing the bucket
    @@ -124,9 +144,15 @@ public class AcidUtils {
          } else if (options.isWritingBase()) {
            subdir = BASE_PREFIX + String.format(DELTA_DIGITS,
                options.getMaximumTransactionId());
    + } else if(options.getStatementId() == -1) {
    + //when minor compaction runs, we collapse per statement delta files inside a single
    + //transaction so we no longer need a statementId in the file name
    + subdir = deltaSubdir(options.getMinimumTransactionId(),
    + options.getMaximumTransactionId());
          } else {
            subdir = deltaSubdir(options.getMinimumTransactionId(),
    - options.getMaximumTransactionId());
    + options.getMaximumTransactionId(),
    + options.getStatementId());
          }
          return createBucketFile(new Path(directory, subdir), options.getBucket());
        }
    @@ -214,14 +240,24 @@ public class AcidUtils {
        }

        public static class ParsedDelta implements Comparable<ParsedDelta> {
    - final long minTransaction;
    - final long maxTransaction;
    - final FileStatus path;
    + private final long minTransaction;
    + private final long maxTransaction;
    + private final FileStatus path;
    + //-1 is for internal (getAcidState()) purposes and means the delta dir
    + //had no statement ID
    + private final int statementId;

    + /**
    + * for pre 1.3.x delta files
    + */
          ParsedDelta(long min, long max, FileStatus path) {
    + this(min, max, path, -1);
    + }
    + ParsedDelta(long min, long max, FileStatus path, int statementId) {
            this.minTransaction = min;
            this.maxTransaction = max;
            this.path = path;
    + this.statementId = statementId;
          }

          public long getMinTransaction() {
    @@ -236,6 +272,16 @@ public class AcidUtils {
            return path.getPath();
          }

    + public int getStatementId() {
    + return statementId == -1 ? 0 : statementId;
    + }
    +
    + /**
    + * Compactions (Major/Minor) merge deltas/bases but delete of old files
    + * happens in a different process; thus it's possible to have bases/deltas with
    + * overlapping txnId boundaries. The sort order helps figure out the "best" set of files
    + * to use to get data.
    + */
          @Override
          public int compareTo(ParsedDelta parsedDelta) {
            if (minTransaction != parsedDelta.minTransaction) {
    @@ -250,7 +296,22 @@ public class AcidUtils {
              } else {
                return -1;
              }
    - } else {
    + }
    + else if(statementId != parsedDelta.statementId) {
    + /**
    + * We want deltas after minor compaction (w/o statementId) to sort
    + * earlier so that getAcidState() considers compacted files (into larger ones) obsolete
    + * Before compaction, include deltas with all statementIds for a given txnId
    + * in a {@link org.apache.hadoop.hive.ql.io.AcidUtils.Directory}
    + */
    + if(statementId < parsedDelta.statementId) {
    + return -1;
    + }
    + else {
    + return 1;
    + }
    + }
    + else {
              return path.compareTo(parsedDelta.path);
            }
          }
    @@ -271,46 +332,72 @@ public class AcidUtils {

        /**
         * Convert the list of deltas into an equivalent list of begin/end
    - * transaction id pairs.
    + * transaction id pairs. Assumes {@code deltas} is sorted.
         * @param deltas
         * @return the list of transaction ids to serialize
         */
    - public static List<Long> serializeDeltas(List<ParsedDelta> deltas) {
    - List<Long> result = new ArrayList<Long>(deltas.size() * 2);
    - for(ParsedDelta delta: deltas) {
    - result.add(delta.minTransaction);
    - result.add(delta.maxTransaction);
    + public static List<AcidInputFormat.DeltaMetaData> serializeDeltas(List<ParsedDelta> deltas) {
    + List<AcidInputFormat.DeltaMetaData> result = new ArrayList<>(deltas.size());
    + AcidInputFormat.DeltaMetaData last = null;
    + for(ParsedDelta parsedDelta : deltas) {
    + if(last != null && last.getMinTxnId() == parsedDelta.getMinTransaction() && last.getMaxTxnId() == parsedDelta.getMaxTransaction()) {
    + last.getStmtIds().add(parsedDelta.getStatementId());
    + continue;
    + }
    + last = new AcidInputFormat.DeltaMetaData(parsedDelta.getMinTransaction(), parsedDelta.getMaxTransaction(), new ArrayList<Integer>());
    + result.add(last);
    + if(parsedDelta.statementId >= 0) {
    + last.getStmtIds().add(parsedDelta.getStatementId());
    + }
          }
          return result;
        }

        /**
         * Convert the list of begin/end transaction id pairs to a list of delta
    - * directories.
    + * directories. Note that there may be multiple delta files for the exact same txn range starting
    + * with 1.3.x;
    + * see {@link org.apache.hadoop.hive.ql.io.AcidUtils#deltaSubdir(long, long, int)}
         * @param root the root directory
         * @param deltas list of begin/end transaction id pairs
         * @return the list of delta paths
         */
    - public static Path[] deserializeDeltas(Path root, List<Long> deltas) {
    - int deltaSize = deltas.size() / 2;
    - Path[] result = new Path[deltaSize];
    - for(int i = 0; i < deltaSize; ++i) {
    - result[i] = new Path(root, deltaSubdir(deltas.get(i * 2),
    - deltas.get(i * 2 + 1)));
    + public static Path[] deserializeDeltas(Path root, final List<AcidInputFormat.DeltaMetaData> deltas) throws IOException {
    + List<Path> results = new ArrayList<Path>(deltas.size());
    + for(AcidInputFormat.DeltaMetaData dmd : deltas) {
    + if(dmd.getStmtIds().isEmpty()) {
    + results.add(new Path(root, deltaSubdir(dmd.getMinTxnId(), dmd.getMaxTxnId())));
    + continue;
    + }
    + for(Integer stmtId : dmd.getStmtIds()) {
    + results.add(new Path(root, deltaSubdir(dmd.getMinTxnId(), dmd.getMaxTxnId(), stmtId)));
    + }
          }
    - return result;
    + return results.toArray(new Path[results.size()]);
        }

    - static ParsedDelta parseDelta(FileStatus path) {
    - String filename = path.getPath().getName();
    + private static ParsedDelta parseDelta(FileStatus path) {
    + ParsedDelta p = parsedDelta(path.getPath());
    + return new ParsedDelta(p.getMinTransaction(),
    + p.getMaxTransaction(), path, p.statementId);
    + }
    + public static ParsedDelta parsedDelta(Path deltaDir) {
    + String filename = deltaDir.getName();
          if (filename.startsWith(DELTA_PREFIX)) {
            String rest = filename.substring(DELTA_PREFIX.length());
            int split = rest.indexOf('_');
    + int split2 = rest.indexOf('_', split + 1);//may be -1 if no statementId
            long min = Long.parseLong(rest.substring(0, split));
    - long max = Long.parseLong(rest.substring(split + 1));
    - return new ParsedDelta(min, max, path);
    + long max = split2 == -1 ?
    + Long.parseLong(rest.substring(split + 1)) :
    + Long.parseLong(rest.substring(split + 1, split2));
    + if(split2 == -1) {
    + return new ParsedDelta(min, max, null);
    + }
    + int statementId = Integer.parseInt(rest.substring(split2 + 1));
    + return new ParsedDelta(min, max, null, statementId);
          }
    - throw new IllegalArgumentException(path + " does not start with " +
    + throw new IllegalArgumentException(deltaDir + " does not start with " +
                                             DELTA_PREFIX);
        }

    @@ -407,15 +494,24 @@ public class AcidUtils {

          Collections.sort(working);
          long current = bestBaseTxn;
    + int lastStmtId = -1;
          for(ParsedDelta next: working) {
            if (next.maxTransaction > current) {
              // are any of the new transactions ones that we care about?
              if (txnList.isTxnRangeValid(current+1, next.maxTransaction) !=
    - ValidTxnList.RangeResponse.NONE) {
    + ValidTxnList.RangeResponse.NONE) {
                deltas.add(next);
                current = next.maxTransaction;
    + lastStmtId = next.statementId;
              }
    - } else {
    + }
    + else if(next.maxTransaction == current && lastStmtId >= 0) {
    + //make sure to get all deltas within a single transaction; multi-statement txn
    + //generate multiple delta files with the same txnId range
    + //of course, if maxTransaction has already been minor compacted, all per statement deltas are obsolete
    + deltas.add(next);
    + }
    + else {
              obsolete.add(next.path);
            }
          }

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
    index 7ad5aa0..50ba740 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java
    @@ -297,31 +297,32 @@ public final class HiveFileFormatUtils {
          // TODO not 100% sure about this. This call doesn't set the compression type in the conf
          // file the way getHiveRecordWriter does, as ORC appears to read the value for itself. Not
          // sure if this is correct or not.
    - return getRecordUpdater(jc, acidOutputFormat, conf.getCompressed(), conf.getTransactionId(),
    - bucket, inspector, tableInfo.getProperties(), outPath, reporter, rowIdColNum);
    + return getRecordUpdater(jc, acidOutputFormat,
    + bucket, inspector, tableInfo.getProperties(), outPath, reporter, rowIdColNum, conf);
        }


        private static RecordUpdater getRecordUpdater(JobConf jc,
                                                      AcidOutputFormat<?, ?> acidOutputFormat,
    - boolean isCompressed,
    - long txnId,
                                                      int bucket,
                                                      ObjectInspector inspector,
                                                      Properties tableProp,
                                                      Path outPath,
                                                      Reporter reporter,
    - int rowIdColNum) throws IOException {
    + int rowIdColNum,
    + FileSinkDesc conf) throws IOException {
          return acidOutputFormat.getRecordUpdater(outPath, new AcidOutputFormat.Options(jc)
    - .isCompressed(isCompressed)
    + .isCompressed(conf.getCompressed())
              .tableProperties(tableProp)
              .reporter(reporter)
              .writingBase(false)
    - .minimumTransactionId(txnId)
    - .maximumTransactionId(txnId)
    + .minimumTransactionId(conf.getTransactionId())
    + .maximumTransactionId(conf.getTransactionId())
              .bucket(bucket)
              .inspector(inspector)
    - .recordIdColumn(rowIdColNum));
    + .recordIdColumn(rowIdColNum)
    + .statementId(conf.getStatementId())
    + .finalDestination(conf.getDestPath()));
        }

        public static PartitionDesc getPartitionDescFromPathRecursively(

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
    index 8864013..3a9e64e 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
    @@ -439,13 +439,13 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
          private final FileStatus file;
          private final FileInfo fileInfo;
          private final boolean isOriginal;
    - private final List<Long> deltas;
    + private final List<DeltaMetaData> deltas;
          private final boolean hasBase;

          SplitInfo(Context context, FileSystem fs,
              FileStatus file, FileInfo fileInfo,
              boolean isOriginal,
    - List<Long> deltas,
    + List<DeltaMetaData> deltas,
              boolean hasBase, Path dir, boolean[] covered) throws IOException {
            super(dir, context.numBuckets, deltas, covered);
            this.context = context;
    @@ -467,12 +467,12 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
          FileSystem fs;
          List<FileStatus> files;
          boolean isOriginal;
    - List<Long> deltas;
    + List<DeltaMetaData> deltas;
          Path dir;
          boolean[] covered;

          public ETLSplitStrategy(Context context, FileSystem fs, Path dir, List<FileStatus> children,
    - boolean isOriginal, List<Long> deltas, boolean[] covered) {
    + boolean isOriginal, List<DeltaMetaData> deltas, boolean[] covered) {
            this.context = context;
            this.dir = dir;
            this.fs = fs;
    @@ -543,14 +543,14 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
        static final class BISplitStrategy extends ACIDSplitStrategy {
          List<FileStatus> fileStatuses;
          boolean isOriginal;
    - List<Long> deltas;
    + List<DeltaMetaData> deltas;
          FileSystem fs;
          Context context;
          Path dir;

          public BISplitStrategy(Context context, FileSystem fs,
              Path dir, List<FileStatus> fileStatuses, boolean isOriginal,
    - List<Long> deltas, boolean[] covered) {
    + List<DeltaMetaData> deltas, boolean[] covered) {
            super(dir, context.numBuckets, deltas, covered);
            this.context = context;
            this.fileStatuses = fileStatuses;
    @@ -587,11 +587,11 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
         */
        static class ACIDSplitStrategy implements SplitStrategy<OrcSplit> {
          Path dir;
    - List<Long> deltas;
    + List<DeltaMetaData> deltas;
          boolean[] covered;
          int numBuckets;

    - public ACIDSplitStrategy(Path dir, int numBuckets, List<Long> deltas, boolean[] covered) {
    + public ACIDSplitStrategy(Path dir, int numBuckets, List<DeltaMetaData> deltas, boolean[] covered) {
            this.dir = dir;
            this.numBuckets = numBuckets;
            this.deltas = deltas;
    @@ -640,7 +640,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
            final SplitStrategy splitStrategy;
            AcidUtils.Directory dirInfo = AcidUtils.getAcidState(dir,
                context.conf, context.transactionList);
    - List<Long> deltas = AcidUtils.serializeDeltas(dirInfo.getCurrentDirectories());
    + List<DeltaMetaData> deltas = AcidUtils.serializeDeltas(dirInfo.getCurrentDirectories());
            Path base = dirInfo.getBaseDirectory();
            List<FileStatus> original = dirInfo.getOriginalFiles();
            boolean[] covered = new boolean[context.numBuckets];
    @@ -718,7 +718,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
          private Metadata metadata;
          private List<OrcProto.Type> types;
          private final boolean isOriginal;
    - private final List<Long> deltas;
    + private final List<DeltaMetaData> deltas;
          private final boolean hasBase;
          private OrcFile.WriterVersion writerVersion;
          private long projColsUncompressedSize;

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java
    index da23544..b58c880 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcNewSplit.java
    @@ -24,6 +24,7 @@ import java.nio.ByteBuffer;
      import java.util.ArrayList;
      import java.util.List;

    +import org.apache.hadoop.hive.ql.io.AcidInputFormat;
      import org.apache.hadoop.io.Text;
      import org.apache.hadoop.io.WritableUtils;
      import org.apache.hadoop.mapreduce.lib.input.FileSplit;
    @@ -37,7 +38,7 @@ public class OrcNewSplit extends FileSplit {
        private boolean hasFooter;
        private boolean isOriginal;
        private boolean hasBase;
    - private final List<Long> deltas = new ArrayList<Long>();
    + private final List<AcidInputFormat.DeltaMetaData> deltas = new ArrayList<>();
        private OrcFile.WriterVersion writerVersion;

        protected OrcNewSplit(){
    @@ -67,8 +68,8 @@ public class OrcNewSplit extends FileSplit {
              (hasFooter ? OrcSplit.FOOTER_FLAG : 0);
          out.writeByte(flags);
          out.writeInt(deltas.size());
    - for(Long delta: deltas) {
    - out.writeLong(delta);
    + for(AcidInputFormat.DeltaMetaData delta: deltas) {
    + delta.write(out);
          }
          if (hasFooter) {
            // serialize FileMetaInfo fields
    @@ -101,7 +102,9 @@ public class OrcNewSplit extends FileSplit {
          deltas.clear();
          int numDeltas = in.readInt();
          for(int i=0; i < numDeltas; i++) {
    - deltas.add(in.readLong());
    + AcidInputFormat.DeltaMetaData dmd = new AcidInputFormat.DeltaMetaData();
    + dmd.readFields(in);
    + deltas.add(dmd);
          }
          if (hasFooter) {
            // deserialize FileMetaInfo fields
    @@ -137,7 +140,7 @@ public class OrcNewSplit extends FileSplit {
          return hasBase;
        }

    - public List<Long> getDeltas() {
    + public List<AcidInputFormat.DeltaMetaData> getDeltas() {
          return deltas;
        }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
    index 728118a..2f11611 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRawRecordMerger.java
    @@ -72,41 +72,55 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
        /**
         * A RecordIdentifier extended with the current transaction id. This is the
         * key of our merge sort with the originalTransaction, bucket, and rowId
    - * ascending and the currentTransaction descending. This means that if the
    + * ascending and the currentTransaction, statementId descending. This means that if the
         * reader is collapsing events to just the last update, just the first
         * instance of each record is required.
         */
        final static class ReaderKey extends RecordIdentifier{
          private long currentTransactionId;
    + private int statementId;//sort on this descending, like currentTransactionId

          public ReaderKey() {
    - this(-1, -1, -1, -1);
    + this(-1, -1, -1, -1, 0);
          }

          public ReaderKey(long originalTransaction, int bucket, long rowId,
                           long currentTransactionId) {
    + this(originalTransaction, bucket, rowId, currentTransactionId, 0);
    + }
    + /**
    + * @param statementId - set this to 0 if N/A
    + */
    + public ReaderKey(long originalTransaction, int bucket, long rowId,
    + long currentTransactionId, int statementId) {
            super(originalTransaction, bucket, rowId);
            this.currentTransactionId = currentTransactionId;
    + this.statementId = statementId;
          }

          @Override
          public void set(RecordIdentifier other) {
            super.set(other);
            currentTransactionId = ((ReaderKey) other).currentTransactionId;
    + statementId = ((ReaderKey) other).statementId;
          }

          public void setValues(long originalTransactionId,
                                int bucket,
                                long rowId,
    - long currentTransactionId) {
    + long currentTransactionId,
    + int statementId) {
            setValues(originalTransactionId, bucket, rowId);
            this.currentTransactionId = currentTransactionId;
    + this.statementId = statementId;
          }

          @Override
          public boolean equals(Object other) {
            return super.equals(other) &&
    - currentTransactionId == ((ReaderKey) other).currentTransactionId;
    + currentTransactionId == ((ReaderKey) other).currentTransactionId
    + && statementId == ((ReaderKey) other).statementId//consistent with compareTo()
    + ;
          }

          @Override
    @@ -118,6 +132,9 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
                if (currentTransactionId != oth.currentTransactionId) {
                  return currentTransactionId < oth.currentTransactionId ? +1 : -1;
                }
    + if(statementId != oth.statementId) {
    + return statementId < oth.statementId ? +1 : -1;
    + }
              } else {
                return -1;
              }
    @@ -125,6 +142,13 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
            return sup;
          }

    + /**
    + * This means 1 txn modified the same row more than once
    + */
    + private boolean isSameRow(ReaderKey other) {
    + return compareRow(other) == 0 && currentTransactionId == other.currentTransactionId;
    + }
    +
          public long getCurrentTransactionId() {
            return currentTransactionId;
          }
    @@ -142,7 +166,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
          public String toString() {
            return "{originalTxn: " + getTransactionId() + ", bucket: " +
                getBucketId() + ", row: " + getRowId() + ", currentTxn: " +
    - currentTransactionId + "}";
    + currentTransactionId + ", statementId: "+ statementId + "}";
          }
        }

    @@ -159,6 +183,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
          final ReaderKey key;
          final RecordIdentifier maxKey;
          final int bucket;
    + private final int statementId;

          /**
           * Create a reader that reads from the first key larger than minKey to any
    @@ -170,17 +195,19 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
           * @param maxKey only return keys less than or equal to maxKey if it is
           * non-null
           * @param options options to provide to read the rows.
    + * @param statementId id of SQL statement within a transaction
           * @throws IOException
           */
          ReaderPair(ReaderKey key, Reader reader, int bucket,
                     RecordIdentifier minKey, RecordIdentifier maxKey,
    - ReaderImpl.Options options) throws IOException {
    + ReaderImpl.Options options, int statementId) throws IOException {
            this.reader = reader;
            this.key = key;
            this.maxKey = maxKey;
            this.bucket = bucket;
            // TODO use stripe statistics to jump over stripes
            recordReader = reader.rowsOptions(options);
    + this.statementId = statementId;
            // advance the reader until we reach the minimum key
            do {
              next(nextRecord);
    @@ -195,7 +222,8 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
              key.setValues(OrcRecordUpdater.getOriginalTransaction(nextRecord),
                  OrcRecordUpdater.getBucket(nextRecord),
                  OrcRecordUpdater.getRowId(nextRecord),
    - OrcRecordUpdater.getCurrentTransaction(nextRecord));
    + OrcRecordUpdater.getCurrentTransaction(nextRecord),
    + statementId);

              // if this record is larger than maxKey, we need to stop
              if (maxKey != null && key.compareRow(maxKey) > 0) {
    @@ -223,7 +251,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
          OriginalReaderPair(ReaderKey key, Reader reader, int bucket,
                             RecordIdentifier minKey, RecordIdentifier maxKey,
                             Reader.Options options) throws IOException {
    - super(key, reader, bucket, minKey, maxKey, options);
    + super(key, reader, bucket, minKey, maxKey, options, 0);
          }

          @Override
    @@ -263,7 +291,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
                nextRecord.setFieldValue(OrcRecordUpdater.ROW,
                    recordReader.next(OrcRecordUpdater.getRow(next)));
              }
    - key.setValues(0L, bucket, nextRowId, 0L);
    + key.setValues(0L, bucket, nextRowId, 0L, 0);
              if (maxKey != null && key.compareRow(maxKey) > 0) {
                if (LOG.isDebugEnabled()) {
                  LOG.debug("key " + key + " > maxkey " + maxKey);
    @@ -415,7 +443,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
          this.offset = options.getOffset();
          this.length = options.getLength();
          this.validTxnList = validTxnList;
    - // modify the optins to reflect the event instead of the base row
    + // modify the options to reflect the event instead of the base row
          Reader.Options eventOptions = createEventOptions(options);
          if (reader == null) {
            baseReader = null;
    @@ -438,7 +466,7 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
                                            options);
            } else {
              pair = new ReaderPair(key, reader, bucket, minKey, maxKey,
    - eventOptions);
    + eventOptions, 0);
            }

            // if there is at least one record, put it in the map
    @@ -458,13 +486,14 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
            for(Path delta: deltaDirectory) {
              ReaderKey key = new ReaderKey();
              Path deltaFile = AcidUtils.createBucketFile(delta, bucket);
    + AcidUtils.ParsedDelta deltaDir = AcidUtils.parsedDelta(delta);
              FileSystem fs = deltaFile.getFileSystem(conf);
              long length = getLastFlushLength(fs, deltaFile);
              if (length != -1 && fs.exists(deltaFile)) {
                Reader deltaReader = OrcFile.createReader(deltaFile,
                    OrcFile.readerOptions(conf).maxLength(length));
                ReaderPair deltaPair = new ReaderPair(key, deltaReader, bucket, minKey,
    - maxKey, eventOptions);
    + maxKey, eventOptions, deltaDir.getStatementId());
                if (deltaPair.nextRecord != null) {
                  readers.put(key, deltaPair);
                }
    @@ -580,9 +609,18 @@ public class OrcRawRecordMerger implements AcidInputFormat.RawReader<OrcStruct>{
              continue;
            }

    + /*for multi-statement txns, you may have multiple events for the same
    + * row in the same (current) transaction. We want to collapse these to just the last one
    + * regardless whether we are minor compacting. Consider INSERT/UPDATE/UPDATE of the
    + * same row in the same txn. There is no benefit passing along anything except the last
    + * event. If we did want to pass it along, we'd have to include statementId in the row
    + * returned so that compaction could write it out or make minor minor compaction understand
    + * how to write out delta files in delta_xxx_yyy_stid format. There doesn't seem to be any
    + * value in this.*/
    + boolean isSameRow = prevKey.isSameRow((ReaderKey)recordIdentifier);
            // if we are collapsing, figure out if this is a new row
    - if (collapse) {
    - keysSame = prevKey.compareRow(recordIdentifier) == 0;
    + if (collapse || isSameRow) {
    + keysSame = (collapse && prevKey.compareRow(recordIdentifier) == 0) || (isSameRow);
              if (!keysSame) {
                prevKey.set(recordIdentifier);
              }

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
    index b576496..e4651b8 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java
    @@ -89,6 +89,7 @@ public class OrcRecordUpdater implements RecordUpdater {
        private final IntWritable bucket = new IntWritable();
        private final LongWritable rowId = new LongWritable();
        private long insertedRows = 0;
    + private long rowIdOffset = 0;
        // This records how many rows have been inserted or deleted. It is separate from insertedRows
        // because that is monotonically increasing to give new unique row ids.
        private long rowCountDelta = 0;
    @@ -263,6 +264,41 @@ public class OrcRecordUpdater implements RecordUpdater {
          item.setFieldValue(ROW_ID, rowId);
        }

    + /**
    + * To handle multiple INSERT... statements in a single transaction, we want to make sure
    + * to generate unique {@code rowId} for all inserted rows of the transaction.
    + * @return largest rowId created by previous statements (maybe 0)
    + * @throws IOException
    + */
    + private long findRowIdOffsetForInsert() throws IOException {
    + /*
    + * 1. need to know bucket we are writing to
    + * 2. need to know which delta dir it's in
    + * Then,
    + * 1. find the same bucket file in previous delta dir for this txn
    + * 2. read the footer and get AcidStats which has insert count
    + * 2.1 if AcidStats.inserts>0 done
    + * else go to previous delta file
    + * For example, consider insert/update/insert case...*/
    + if(options.getStatementId() <= 0) {
    + return 0;//there is only 1 statement in this transaction (so far)
    + }
    + for(int pastStmt = options.getStatementId() - 1; pastStmt >= 0; pastStmt--) {
    + Path matchingBucket = AcidUtils.createFilename(options.getFinalDestination(), options.clone().statementId(pastStmt));
    + if(!fs.exists(matchingBucket)) {
    + continue;
    + }
    + Reader reader = OrcFile.createReader(matchingBucket, OrcFile.readerOptions(options.getConfiguration()));
    + //no close() on Reader?!
    + AcidStats acidStats = parseAcidStats(reader);
    + if(acidStats.inserts > 0) {
    + return acidStats.inserts;
    + }
    + }
    + //if we got here, we looked at all delta files in this txn, prior to current statement and didn't
    + //find any inserts...
    + return 0;
    + }
        // Find the record identifier column (if there) and return a possibly new ObjectInspector that
        // will strain out the record id for the underlying writer.
        private ObjectInspector findRecId(ObjectInspector inspector, int rowIdColNum) {
    @@ -304,6 +340,9 @@ public class OrcRecordUpdater implements RecordUpdater {
                recIdInspector.getStructFieldData(rowIdValue, originalTxnField));
            rowId = rowIdInspector.get(recIdInspector.getStructFieldData(rowIdValue, rowIdField));
          }
    + else if(operation == INSERT_OPERATION) {
    + rowId += rowIdOffset;
    + }
          this.rowId.set(rowId);
          this.originalTransaction.set(originalTransaction);
          item.setFieldValue(OrcRecordUpdater.ROW, (operation == DELETE_OPERATION ? null : row));
    @@ -315,6 +354,9 @@ public class OrcRecordUpdater implements RecordUpdater {
        public void insert(long currentTransaction, Object row) throws IOException {
          if (this.currentTransaction.get() != currentTransaction) {
            insertedRows = 0;
    + //this method is almost no-op in hcatalog.streaming case since statementId == 0 is
    + //always true in that case
    + rowIdOffset = findRowIdOffsetForInsert();
          }
          addEvent(INSERT_OPERATION, currentTransaction, insertedRows++, row);
          rowCountDelta++;
    @@ -407,6 +449,22 @@ public class OrcRecordUpdater implements RecordUpdater {
          }
          return result;
        }
    + /**
    + * {@link KeyIndexBuilder} creates these
    + */
    + static AcidStats parseAcidStats(Reader reader) {
    + String statsSerialized;
    + try {
    + ByteBuffer val =
    + reader.getMetadataValue(OrcRecordUpdater.ACID_STATS)
    + .duplicate();
    + statsSerialized = utf8Decoder.decode(val).toString();
    + } catch (CharacterCodingException e) {
    + throw new IllegalArgumentException("Bad string encoding for " +
    + OrcRecordUpdater.ACID_STATS, e);
    + }
    + return new AcidStats(statsSerialized);
    + }

        static class KeyIndexBuilder implements OrcFile.WriterCallback {
          StringBuilder lastKey = new StringBuilder();

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
    index 0c7dd40..8cf4cc0 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSplit.java
    @@ -26,6 +26,8 @@ import java.util.ArrayList;
      import java.util.List;

      import org.apache.hadoop.fs.Path;
    +import org.apache.hadoop.hive.ql.io.AcidInputFormat;
    +import org.apache.hadoop.hive.ql.io.AcidUtils;
      import org.apache.hadoop.io.Text;
      import org.apache.hadoop.io.WritableUtils;
      import org.apache.hadoop.mapred.FileSplit;
    @@ -41,7 +43,7 @@ public class OrcSplit extends FileSplit {
        private boolean hasFooter;
        private boolean isOriginal;
        private boolean hasBase;
    - private final List<Long> deltas = new ArrayList<Long>();
    + private final List<AcidInputFormat.DeltaMetaData> deltas = new ArrayList<>();
        private OrcFile.WriterVersion writerVersion;
        private long projColsUncompressedSize;

    @@ -58,7 +60,7 @@ public class OrcSplit extends FileSplit {

        public OrcSplit(Path path, long offset, long length, String[] hosts,
            ReaderImpl.FileMetaInfo fileMetaInfo, boolean isOriginal, boolean hasBase,
    - List<Long> deltas, long projectedDataSize) {
    + List<AcidInputFormat.DeltaMetaData> deltas, long projectedDataSize) {
          super(path, offset, length, hosts);
          this.fileMetaInfo = fileMetaInfo;
          hasFooter = this.fileMetaInfo != null;
    @@ -78,8 +80,8 @@ public class OrcSplit extends FileSplit {
              (hasFooter ? FOOTER_FLAG : 0);
          out.writeByte(flags);
          out.writeInt(deltas.size());
    - for(Long delta: deltas) {
    - out.writeLong(delta);
    + for(AcidInputFormat.DeltaMetaData delta: deltas) {
    + delta.write(out);
          }
          if (hasFooter) {
            // serialize FileMetaInfo fields
    @@ -112,7 +114,9 @@ public class OrcSplit extends FileSplit {
          deltas.clear();
          int numDeltas = in.readInt();
          for(int i=0; i < numDeltas; i++) {
    - deltas.add(in.readLong());
    + AcidInputFormat.DeltaMetaData dmd = new AcidInputFormat.DeltaMetaData();
    + dmd.readFields(in);
    + deltas.add(dmd);
          }
          if (hasFooter) {
            // deserialize FileMetaInfo fields
    @@ -148,7 +152,7 @@ public class OrcSplit extends FileSplit {
          return hasBase;
        }

    - public List<Long> getDeltas() {
    + public List<AcidInputFormat.DeltaMetaData> getDeltas() {
          return deltas;
        }


    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    index f8fff1a..445f606 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java
    @@ -52,6 +52,14 @@ public class DbTxnManager extends HiveTxnManagerImpl {
        private DbLockManager lockMgr = null;
        private IMetaStoreClient client = null;
        private long txnId = 0;
    + /**
    + * assigns a unique monotonically increasing ID to each statement
    + * which is part of an open transaction. This is used by storage
    + * layer (see {@link org.apache.hadoop.hive.ql.io.AcidUtils#deltaSubdir(long, long, int)})
    + * to keep apart multiple writes of the same data within the same transaction
    + * Also see {@link org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options}
    + */
    + private int statementId = -1;

        DbTxnManager() {
        }
    @@ -69,6 +77,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
          init();
          try {
            txnId = client.openTxn(user);
    + statementId = 0;
            LOG.debug("Opened txn " + txnId);
            return txnId;
          } catch (TException e) {
    @@ -222,7 +231,10 @@ public class DbTxnManager extends HiveTxnManagerImpl {
            return null;
          }

    - List<HiveLock> locks = new ArrayList<HiveLock>(1);
    + List<HiveLock> locks = new ArrayList<HiveLock>(1);
    + if(txnId > 0) {
    + statementId++;
    + }
          LockState lockState = lockMgr.lock(rqstBuilder.build(), plan.getQueryId(), isBlocking, locks);
          ctx.setHiveLocks(locks);
          return lockState;
    @@ -249,6 +261,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
                e);
          } finally {
            txnId = 0;
    + statementId = -1;
          }
        }

    @@ -270,6 +283,7 @@ public class DbTxnManager extends HiveTxnManagerImpl {
                e);
          } finally {
            txnId = 0;
    + statementId = -1;
          }
        }

    @@ -361,5 +375,9 @@ public class DbTxnManager extends HiveTxnManagerImpl {
            }
          }
        }
    + @Override
    + public int getStatementId() {
    + return statementId;
    + }

      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
    index 21ab8ee..1906982 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DummyTxnManager.java
    @@ -54,6 +54,10 @@ class DummyTxnManager extends HiveTxnManagerImpl {
        }

        @Override
    + public int getStatementId() {
    + return 0;
    + }
    + @Override
        public HiveLockManager getLockManager() throws LockException {
          if (lockMgr == null) {
            boolean supportConcurrency =

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
    index 2dd0c7d..6c3dc33 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/HiveTxnManager.java
    @@ -127,4 +127,7 @@ public interface HiveTxnManager {
         * @return true if this transaction manager does ACID
         */
        boolean supportsAcid();
    +
    + int getStatementId();
    +
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    index b02374e..8516631 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    @@ -6605,7 +6605,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
            rsCtx.getNumFiles(),
            rsCtx.getTotalFiles(),
            rsCtx.getPartnCols(),
    - dpCtx);
    + dpCtx,
    + dest_path);

          // If this is an insert, update, or delete on an ACID table then mark that so the
          // FileSinkOperator knows how to properly write to it.

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
    index bb6cee5..f73b502 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
    @@ -92,16 +92,21 @@ public class FileSinkDesc extends AbstractOperatorDesc {
        // Record what type of write this is. Default is non-ACID (ie old style).
        private AcidUtils.Operation writeType = AcidUtils.Operation.NOT_ACID;
        private long txnId = 0; // transaction id for this operation
    + private int statementId = -1;

        private transient Table table;
    + private Path destPath;

        public FileSinkDesc() {
        }

    + /**
    + * @param destPath - the final destination for data
    + */
        public FileSinkDesc(final Path dirName, final TableDesc tableInfo,
            final boolean compressed, final int destTableId, final boolean multiFileSpray,
            final boolean canBeMerged, final int numFiles, final int totalFiles,
    - final ArrayList<ExprNodeDesc> partitionCols, final DynamicPartitionCtx dpCtx) {
    + final ArrayList<ExprNodeDesc> partitionCols, final DynamicPartitionCtx dpCtx, Path destPath) {

          this.dirName = dirName;
          this.tableInfo = tableInfo;
    @@ -114,6 +119,7 @@ public class FileSinkDesc extends AbstractOperatorDesc {
          this.partitionCols = partitionCols;
          this.dpCtx = dpCtx;
          this.dpSortState = DPSortState.NONE;
    + this.destPath = destPath;
        }

        public FileSinkDesc(final Path dirName, final TableDesc tableInfo,
    @@ -135,7 +141,7 @@ public class FileSinkDesc extends AbstractOperatorDesc {
        public Object clone() throws CloneNotSupportedException {
          FileSinkDesc ret = new FileSinkDesc(dirName, tableInfo, compressed,
              destTableId, multiFileSpray, canBeMerged, numFiles, totalFiles,
    - partitionCols, dpCtx);
    + partitionCols, dpCtx, destPath);
          ret.setCompressCodec(compressCodec);
          ret.setCompressType(compressType);
          ret.setGatherStats(gatherStats);
    @@ -231,9 +237,6 @@ public class FileSinkDesc extends AbstractOperatorDesc {
          return temporary;
        }

    - /**
    - * @param totalFiles the totalFiles to set
    - */
        public void setTemporary(boolean temporary) {
          this.temporary = temporary;
        }
    @@ -438,11 +441,23 @@ public class FileSinkDesc extends AbstractOperatorDesc {
        public void setTransactionId(long id) {
          txnId = id;
        }
    -
        public long getTransactionId() {
          return txnId;
        }

    + public void setStatementId(int id) {
    + statementId = id;
    + }
    + /**
    + * See {@link org.apache.hadoop.hive.ql.io.AcidOutputFormat.Options#statementId(int)}
    + */
    + public int getStatementId() {
    + return statementId;
    + }
    + public Path getDestPath() {
    + return destPath;
    + }
    +
        public Table getTable() {
          return table;
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
    index c5f2d4d..6c77ba4 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java
    @@ -545,7 +545,9 @@ public class CompactorMR {
                  .reporter(reporter)
                  .minimumTransactionId(jobConf.getLong(MIN_TXN, Long.MAX_VALUE))
                  .maximumTransactionId(jobConf.getLong(MAX_TXN, Long.MIN_VALUE))
    - .bucket(bucket);
    + .bucket(bucket)
    + .statementId(-1);//setting statementId == -1 makes compacted delta files use
    + //delta_xxxx_yyyy format

              // Instantiate the underlying output format
              @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
    index e400778..c6ae030 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java
    @@ -303,7 +303,8 @@ public class TestFileSinkOperator {
            Map<String, String> partColNames = new HashMap<String, String>(1);
            partColNames.put(PARTCOL_NAME, PARTCOL_NAME);
            dpCtx.setInputToDPCols(partColNames);
    - desc = new FileSinkDesc(basePath, tableDesc, false, 1, false, false, 1, 1, partCols, dpCtx);
    + //todo: does this need the finalDestination?
    + desc = new FileSinkDesc(basePath, tableDesc, false, 1, false, false, 1, 1, partCols, dpCtx, null);
          } else {
            desc = new FileSinkDesc(basePath, tableDesc, false);
          }

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
    index 1e3df34..f8ded12 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidUtils.java
    @@ -46,17 +46,23 @@ public class TestAcidUtils {
              AcidUtils.createFilename(p, options).toString());
          options.bucket(123);
          assertEquals("/tmp/00123_0",
    - AcidUtils.createFilename(p, options).toString());
    + AcidUtils.createFilename(p, options).toString());
          options.bucket(23)
              .minimumTransactionId(100)
              .maximumTransactionId(200)
              .writingBase(true)
              .setOldStyle(false);
          assertEquals("/tmp/base_0000200/bucket_00023",
    - AcidUtils.createFilename(p, options).toString());
    + AcidUtils.createFilename(p, options).toString());
          options.writingBase(false);
    + assertEquals("/tmp/delta_0000100_0000200_0000/bucket_00023",
    + AcidUtils.createFilename(p, options).toString());
    + options.statementId(-1);
          assertEquals("/tmp/delta_0000100_0000200/bucket_00023",
    - AcidUtils.createFilename(p, options).toString());
    + AcidUtils.createFilename(p, options).toString());
    + options.statementId(7);
    + assertEquals("/tmp/delta_0000100_0000200_0007/bucket_00023",
    + AcidUtils.createFilename(p, options).toString());
        }

        @Test
    @@ -236,7 +242,6 @@ public class TestAcidUtils {
              new MockFile("mock:/tbl/part1/delta_40_60/bucket_0", 500, new byte[0]),
              new MockFile("mock:/tbl/part1/delta_0060_60/bucket_0", 500, new byte[0]),
              new MockFile("mock:/tbl/part1/delta_052_55/bucket_0", 500, new byte[0]),
    - new MockFile("mock:/tbl/part1/delta_40_60/bucket_0", 500, new byte[0]),
              new MockFile("mock:/tbl/part1/base_50/bucket_0", 500, new byte[0]));
          Path part = new MockPath(fs, "mock:/tbl/part1");
          AcidUtils.Directory dir =
    @@ -254,6 +259,45 @@ public class TestAcidUtils {
          assertEquals("mock:/tbl/part1/delta_0000063_63", delts.get(3).getPath().toString());
        }

    + /**
    + * Hive 1.3.0 delta dir naming scheme which supports multi-statement txns
    + * @throws Exception
    + */
    + @Test
    + public void testOverlapingDelta2() throws Exception {
    + Configuration conf = new Configuration();
    + MockFileSystem fs = new MockFileSystem(conf,
    + new MockFile("mock:/tbl/part1/delta_0000063_63_0/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_000062_62_0/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_000062_62_3/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_00061_61_0/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_40_60/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_0060_60_1/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_0060_60_4/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_0060_60_7/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_052_55/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_058_58/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/base_50/bucket_0", 500, new byte[0]));
    + Path part = new MockPath(fs, "mock:/tbl/part1");
    + AcidUtils.Directory dir =
    + AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:"));
    + assertEquals("mock:/tbl/part1/base_50", dir.getBaseDirectory().toString());
    + List<FileStatus> obsolete = dir.getObsolete();
    + assertEquals(5, obsolete.size());
    + assertEquals("mock:/tbl/part1/delta_052_55", obsolete.get(0).getPath().toString());
    + assertEquals("mock:/tbl/part1/delta_058_58", obsolete.get(1).getPath().toString());
    + assertEquals("mock:/tbl/part1/delta_0060_60_1", obsolete.get(2).getPath().toString());
    + assertEquals("mock:/tbl/part1/delta_0060_60_4", obsolete.get(3).getPath().toString());
    + assertEquals("mock:/tbl/part1/delta_0060_60_7", obsolete.get(4).getPath().toString());
    + List<AcidUtils.ParsedDelta> delts = dir.getCurrentDirectories();
    + assertEquals(5, delts.size());
    + assertEquals("mock:/tbl/part1/delta_40_60", delts.get(0).getPath().toString());
    + assertEquals("mock:/tbl/part1/delta_00061_61_0", delts.get(1).getPath().toString());
    + assertEquals("mock:/tbl/part1/delta_000062_62_0", delts.get(2).getPath().toString());
    + assertEquals("mock:/tbl/part1/delta_000062_62_3", delts.get(3).getPath().toString());
    + assertEquals("mock:/tbl/part1/delta_0000063_63_0", delts.get(4).getPath().toString());
    + }
    +
        @Test
        public void deltasWithOpenTxnInRead() throws Exception {
          Configuration conf = new Configuration();
    @@ -268,6 +312,27 @@ public class TestAcidUtils {
          assertEquals("mock:/tbl/part1/delta_2_5", delts.get(1).getPath().toString());
        }

    + /**
    + * @since 1.3.0
    + * @throws Exception
    + */
    + @Test
    + public void deltasWithOpenTxnInRead2() throws Exception {
    + Configuration conf = new Configuration();
    + MockFileSystem fs = new MockFileSystem(conf,
    + new MockFile("mock:/tbl/part1/delta_1_1/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_2_5/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_4_4_1/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_4_4_3/bucket_0", 500, new byte[0]),
    + new MockFile("mock:/tbl/part1/delta_101_101_1/bucket_0", 500, new byte[0]));
    + Path part = new MockPath(fs, "mock:/tbl/part1");
    + AcidUtils.Directory dir = AcidUtils.getAcidState(part, conf, new ValidReadTxnList("100:4"));
    + List<AcidUtils.ParsedDelta> delts = dir.getCurrentDirectories();
    + assertEquals(2, delts.size());
    + assertEquals("mock:/tbl/part1/delta_1_1", delts.get(0).getPath().toString());
    + assertEquals("mock:/tbl/part1/delta_2_5", delts.get(1).getPath().toString());
    + }
    +
        @Test
        public void deltasWithOpenTxnsNotInCompact() throws Exception {
          Configuration conf = new Configuration();

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
    index 56e5f9f..e96ab2a 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java
    @@ -59,6 +59,7 @@ import org.apache.hadoop.hive.ql.exec.vector.DecimalColumnVector;
      import org.apache.hadoop.hive.ql.exec.vector.DoubleColumnVector;
      import org.apache.hadoop.hive.ql.exec.vector.LongColumnVector;
      import org.apache.hadoop.hive.ql.exec.vector.VectorizedRowBatch;
    +import org.apache.hadoop.hive.ql.io.AcidInputFormat;
      import org.apache.hadoop.hive.ql.io.AcidOutputFormat;
      import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat;
      import org.apache.hadoop.hive.ql.io.HiveInputFormat;
    @@ -927,7 +928,7 @@ public class TestInputOutputFormat {
          OrcInputFormat.SplitGenerator splitter =
              new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
                  fs.getFileStatus(new Path("/a/file")), null, true,
    - new ArrayList<Long>(), true, null, null));
    + new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null));
          OrcSplit result = splitter.createSplit(0, 200, null);
          assertEquals(0, result.getStart());
          assertEquals(200, result.getLength());
    @@ -968,7 +969,7 @@ public class TestInputOutputFormat {
          OrcInputFormat.SplitGenerator splitter =
              new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
                  fs.getFileStatus(new Path("/a/file")), null, true,
    - new ArrayList<Long>(), true, null, null));
    + new ArrayList<AcidInputFormat.DeltaMetaData>(), true, null, null));
          List<OrcSplit> results = splitter.call();
          OrcSplit result = results.get(0);
          assertEquals(3, result.getStart());
    @@ -990,7 +991,7 @@ public class TestInputOutputFormat {
          conf.setInt(OrcInputFormat.MAX_SPLIT_SIZE, 0);
          context = new OrcInputFormat.Context(conf);
          splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs,
    - fs.getFileStatus(new Path("/a/file")), null, true, new ArrayList<Long>(),
    + fs.getFileStatus(new Path("/a/file")), null, true, new ArrayList<AcidInputFormat.DeltaMetaData>(),
              true, null, null));
          results = splitter.call();
          for(int i=0; i < stripeSizes.length; ++i) {
    @@ -1497,7 +1498,7 @@ public class TestInputOutputFormat {
          Path partDir = new Path(conf.get("mapred.input.dir"));
          OrcRecordUpdater writer = new OrcRecordUpdater(partDir,
              new AcidOutputFormat.Options(conf).maximumTransactionId(10)
    - .writingBase(true).bucket(0).inspector(inspector));
    + .writingBase(true).bucket(0).inspector(inspector).finalDestination(partDir));
          for(int i=0; i < 100; ++i) {
            BigRow row = new BigRow(i);
            writer.insert(10, row);
    @@ -1648,7 +1649,7 @@ public class TestInputOutputFormat {
          // write a base file in partition 0
          OrcRecordUpdater writer = new OrcRecordUpdater(partDir[0],
              new AcidOutputFormat.Options(conf).maximumTransactionId(10)
    - .writingBase(true).bucket(0).inspector(inspector));
    + .writingBase(true).bucket(0).inspector(inspector).finalDestination(partDir[0]));
          for(int i=0; i < 10; ++i) {
            writer.insert(10, new MyRow(i, 2 * i));
          }
    @@ -1661,7 +1662,7 @@ public class TestInputOutputFormat {
          // write a delta file in partition 0
          writer = new OrcRecordUpdater(partDir[0],
              new AcidOutputFormat.Options(conf).maximumTransactionId(10)
    - .writingBase(true).bucket(1).inspector(inspector));
    + .writingBase(true).bucket(1).inspector(inspector).finalDestination(partDir[0]));
          for(int i=10; i < 20; ++i) {
            writer.insert(10, new MyRow(i, 2*i));
          }

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
    index 921e954..39f71f1 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java
    @@ -62,12 +62,12 @@ import static org.junit.Assert.assertNull;
      public class TestOrcRawRecordMerger {

        private static final Log LOG = LogFactory.getLog(TestOrcRawRecordMerger.class);
    -
    +//todo: why is statementId -1?
        @Test
        public void testOrdering() throws Exception {
          ReaderKey left = new ReaderKey(100, 200, 1200, 300);
          ReaderKey right = new ReaderKey();
    - right.setValues(100, 200, 1000, 200);
    + right.setValues(100, 200, 1000, 200,1);
          assertTrue(right.compareTo(left) < 0);
          assertTrue(left.compareTo(right) > 0);
          assertEquals(false, left.equals(right));
    @@ -76,16 +76,16 @@ public class TestOrcRawRecordMerger {
          assertEquals(true, right.equals(left));
          right.setRowId(2000);
          assertTrue(right.compareTo(left) > 0);
    - left.setValues(1, 2, 3, 4);
    - right.setValues(100, 2, 3, 4);
    + left.setValues(1, 2, 3, 4,-1);
    + right.setValues(100, 2, 3, 4,-1);
          assertTrue(left.compareTo(right) < 0);
          assertTrue(right.compareTo(left) > 0);
    - left.setValues(1, 2, 3, 4);
    - right.setValues(1, 100, 3, 4);
    + left.setValues(1, 2, 3, 4,-1);
    + right.setValues(1, 100, 3, 4,-1);
          assertTrue(left.compareTo(right) < 0);
          assertTrue(right.compareTo(left) > 0);
    - left.setValues(1, 2, 3, 100);
    - right.setValues(1, 2, 3, 4);
    + left.setValues(1, 2, 3, 100,-1);
    + right.setValues(1, 2, 3, 4,-1);
          assertTrue(left.compareTo(right) < 0);
          assertTrue(right.compareTo(left) > 0);

    @@ -177,7 +177,7 @@ public class TestOrcRawRecordMerger {
          RecordIdentifier minKey = new RecordIdentifier(10, 20, 30);
          RecordIdentifier maxKey = new RecordIdentifier(40, 50, 60);
          ReaderPair pair = new ReaderPair(key, reader, 20, minKey, maxKey,
    - new Reader.Options());
    + new Reader.Options(), 0);
          RecordReader recordReader = pair.recordReader;
          assertEquals(10, key.getTransactionId());
          assertEquals(20, key.getBucketId());
    @@ -203,7 +203,7 @@ public class TestOrcRawRecordMerger {
          Reader reader = createMockReader();

          ReaderPair pair = new ReaderPair(key, reader, 20, null, null,
    - new Reader.Options());
    + new Reader.Options(), 0);
          RecordReader recordReader = pair.recordReader;
          assertEquals(10, key.getTransactionId());
          assertEquals(20, key.getBucketId());
    @@ -489,7 +489,7 @@ public class TestOrcRawRecordMerger {
          // write the empty base
          AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
              .inspector(inspector).bucket(BUCKET).writingBase(true)
    - .maximumTransactionId(100);
    + .maximumTransactionId(100).finalDestination(root);
          of.getRecordUpdater(root, options).close(false);

          ValidTxnList txnList = new ValidReadTxnList("200:");
    @@ -515,6 +515,10 @@ public class TestOrcRawRecordMerger {
         */
        @Test
        public void testNewBaseAndDelta() throws Exception {
    + testNewBaseAndDelta(false);
    + testNewBaseAndDelta(true);
    + }
    + private void testNewBaseAndDelta(boolean use130Format) throws Exception {
          final int BUCKET = 10;
          String[] values = new String[]{"first", "second", "third", "fourth",
                                         "fifth", "sixth", "seventh", "eighth",
    @@ -532,7 +536,10 @@ public class TestOrcRawRecordMerger {

          // write the base
          AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
    - .inspector(inspector).bucket(BUCKET);
    + .inspector(inspector).bucket(BUCKET).finalDestination(root);
    + if(!use130Format) {
    + options.statementId(-1);
    + }
          RecordUpdater ru = of.getRecordUpdater(root,
              options.writingBase(true).maximumTransactionId(100));
          for(String v: values) {
    @@ -554,7 +561,8 @@ public class TestOrcRawRecordMerger {
          AcidUtils.Directory directory = AcidUtils.getAcidState(root, conf, txnList);

          assertEquals(new Path(root, "base_0000100"), directory.getBaseDirectory());
    - assertEquals(new Path(root, "delta_0000200_0000200"),
    + assertEquals(new Path(root, use130Format ?
    + AcidUtils.deltaSubdir(200,200,0) : AcidUtils.deltaSubdir(200,200)),
              directory.getCurrentDirectories().get(0).getPath());

          Path basePath = AcidUtils.createBucketFile(directory.getBaseDirectory(),
    @@ -829,7 +837,7 @@ public class TestOrcRawRecordMerger {
          // write a delta
          AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf)
              .writingBase(false).minimumTransactionId(1).maximumTransactionId(1)
    - .bucket(BUCKET).inspector(inspector).filesystem(fs).recordIdColumn(5);
    + .bucket(BUCKET).inspector(inspector).filesystem(fs).recordIdColumn(5).finalDestination(root);
          RecordUpdater ru = of.getRecordUpdater(root, options);
          values = new String[]{"0.0", null, null, "1.1", null, null, null,
              "ignore.7"};
    @@ -920,6 +928,7 @@ public class TestOrcRawRecordMerger {
          options.orcOptions(OrcFile.writerOptions(conf)
            .stripeSize(1).blockPadding(false).compress(CompressionKind.NONE)
            .memory(mgr));
    + options.finalDestination(root);
          RecordUpdater ru = of.getRecordUpdater(root, options);
          String[] values= new String[]{"ignore.1", "0.1", "ignore.2", "ignore.3",
              "2.0", "2.1", "3.0", "ignore.4", "ignore.5", "ignore.6"};
    @@ -1004,7 +1013,8 @@ public class TestOrcRawRecordMerger {
          AcidOutputFormat.Options options =
              new AcidOutputFormat.Options(conf)
                  .bucket(BUCKET).inspector(inspector).filesystem(fs)
    - .writingBase(false).minimumTransactionId(1).maximumTransactionId(1);
    + .writingBase(false).minimumTransactionId(1).maximumTransactionId(1)
    + .finalDestination(root);
          RecordUpdater ru = of.getRecordUpdater(root, options);
          String[] values = new String[]{"a", "b", "c", "d", "e"};
          for(int i=0; i < values.length; ++i) {
    @@ -1047,6 +1057,14 @@ public class TestOrcRawRecordMerger {
         */
        @Test
        public void testRecordReaderIncompleteDelta() throws Exception {
    + testRecordReaderIncompleteDelta(false);
    + testRecordReaderIncompleteDelta(true);
    + }
    + /**
    + *
    + * @param use130Format true means use delta_0001_0001_0000 format, else delta_0001_00001
    + */
    + private void testRecordReaderIncompleteDelta(boolean use130Format) throws Exception {
          final int BUCKET = 1;
          Configuration conf = new Configuration();
          OrcOutputFormat of = new OrcOutputFormat();
    @@ -1063,7 +1081,10 @@ public class TestOrcRawRecordMerger {
          AcidOutputFormat.Options options =
              new AcidOutputFormat.Options(conf)
                  .writingBase(true).minimumTransactionId(0).maximumTransactionId(0)
    - .bucket(BUCKET).inspector(inspector).filesystem(fs);
    + .bucket(BUCKET).inspector(inspector).filesystem(fs).finalDestination(root);
    + if(!use130Format) {
    + options.statementId(-1);
    + }
          RecordUpdater ru = of.getRecordUpdater(root, options);
          String[] values= new String[]{"1", "2", "3", "4", "5"};
          for(int i=0; i < values.length; ++i) {
    @@ -1110,8 +1131,8 @@ public class TestOrcRawRecordMerger {
          splits = inf.getSplits(job, 1);
          assertEquals(2, splits.length);
          rr = inf.getRecordReader(splits[0], job, Reporter.NULL);
    - Path sideFile = new Path(root +
    - "/delta_0000010_0000019/bucket_00001_flush_length");
    + Path sideFile = new Path(root + "/" + (use130Format ? AcidUtils.deltaSubdir(10,19,0) :
    + AcidUtils.deltaSubdir(10,19)) + "/bucket_00001_flush_length");
          assertEquals(true, fs.exists(sideFile));
          assertEquals(24, fs.getFileStatus(sideFile).getLen());


    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
    index 22bd4b9..22030b4 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java
    @@ -97,7 +97,8 @@ public class TestOrcRecordUpdater {
              .minimumTransactionId(10)
              .maximumTransactionId(19)
              .inspector(inspector)
    - .reporter(Reporter.NULL);
    + .reporter(Reporter.NULL)
    + .finalDestination(root);
          RecordUpdater updater = new OrcRecordUpdater(root, options);
          updater.insert(11, new MyRow("first"));
          updater.insert(11, new MyRow("second"));
    @@ -197,7 +198,8 @@ public class TestOrcRecordUpdater {
              .maximumTransactionId(100)
              .inspector(inspector)
              .reporter(Reporter.NULL)
    - .recordIdColumn(1);
    + .recordIdColumn(1)
    + .finalDestination(root);
          RecordUpdater updater = new OrcRecordUpdater(root, options);
          updater.update(100, new MyRow("update", 30, 10, bucket));
          updater.delete(100, new MyRow("", 60, 40, bucket));

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
    index 671e122..21adc9d 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java
    @@ -241,7 +241,7 @@ public abstract class CompactorTest {
          return sd;
        }

    - // I can't do this with @Before because I want to be able to control when the thead starts
    + // I can't do this with @Before because I want to be able to control when the thread starts
        private void startThread(char type, boolean stopAfterOne) throws Exception {
          startThread(type, stopAfterOne, new AtomicBoolean());
        }
    @@ -284,7 +284,7 @@ public abstract class CompactorTest {
          switch (type) {
            case BASE: filename = "base_" + maxTxn; break;
            case LENGTH_FILE: // Fall through to delta
    - case DELTA: filename = "delta_" + minTxn + "_" + maxTxn; break;
    + case DELTA: filename = makeDeltaDirName(minTxn, maxTxn); break;
            case LEGACY: break; // handled below
          }

    @@ -508,5 +508,21 @@ public abstract class CompactorTest {
          }
        }

    + /**
    + * in Hive 1.3.0 delta file names changed to delta_xxxx_yyyy_zzzz; prior to that
    + * the name was delta_xxxx_yyyy. We want to run compaction tests such that both formats
    + * are used since new (1.3) code has to be able to read old files.
    + */
    + abstract boolean useHive130DeltaDirName();

    + String makeDeltaDirName(long minTxnId, long maxTxnId) {
    + return useHive130DeltaDirName() ?
    + AcidUtils.deltaSubdir(minTxnId, maxTxnId, 0) : AcidUtils.deltaSubdir(minTxnId, maxTxnId);
    + }
    + /**
    + * delta dir name after compaction
    + */
    + String makeDeltaDirNameCompacted(long minTxnId, long maxTxnId) {
    + return AcidUtils.deltaSubdir(minTxnId, maxTxnId);
    + }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
    index ffdbb9a..0db732c 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java
    @@ -139,7 +139,7 @@ public class TestCleaner extends CompactorTest {
          boolean sawBase = false, sawDelta = false;
          for (Path p : paths) {
            if (p.getName().equals("base_20")) sawBase = true;
    - else if (p.getName().equals("delta_21_24")) sawDelta = true;
    + else if (p.getName().equals(makeDeltaDirName(21, 24))) sawDelta = true;
            else Assert.fail("Unexpected file " + p.getName());
          }
          Assert.assertTrue(sawBase);
    @@ -177,7 +177,7 @@ public class TestCleaner extends CompactorTest {
          boolean sawBase = false, sawDelta = false;
          for (Path path : paths) {
            if (path.getName().equals("base_20")) sawBase = true;
    - else if (path.getName().equals("delta_21_24")) sawDelta = true;
    + else if (path.getName().equals(makeDeltaDirNameCompacted(21, 24))) sawDelta = true;
            else Assert.fail("Unexpected file " + path.getName());
          }
          Assert.assertTrue(sawBase);
    @@ -480,4 +480,8 @@ public class TestCleaner extends CompactorTest {
          ShowCompactResponse rsp = txnHandler.showCompact(new ShowCompactRequest());
          Assert.assertEquals(0, rsp.getCompactsSize());
        }
    + @Override
    + boolean useHive130DeltaDirName() {
    + return false;
    + }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner2.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner2.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner2.java
    new file mode 100644
    index 0000000..c637dd1
    --- /dev/null
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner2.java
    @@ -0,0 +1,14 @@
    +package org.apache.hadoop.hive.ql.txn.compactor;
    +
    +/**
    + * Same as TestCleaner but tests delta file names in Hive 1.3.0 format
    + */
    +public class TestCleaner2 extends TestCleaner {
    + public TestCleaner2() throws Exception {
    + super();
    + }
    + @Override
    + boolean useHive130DeltaDirName() {
    + return false;
    + }
    +}

    http://git-wip-us.apache.org/repos/asf/hive/blob/66feedc5/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
    index 00b13de..0b0b1da 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestInitiator.java
    @@ -713,5 +713,9 @@ public class TestInitiator extends CompactorTest {
          List<ShowCompactResponseElement> compacts = rsp.getCompacts();
          Assert.assertEquals(0, compacts.size());
        }
    + @Override
    + boolean useHive130DeltaDirName() {
    + return false;
    + }

      }
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11252 : CBO (Calcite Return Path): DUMMY project in plan (Jesus Camacho Rodriguez via Ashutosh Chauhan)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/999e0e36
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/999e0e36
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/999e0e36

    Branch: refs/heads/spark
    Commit: 999e0e3616525d77cf46c5865f9981b5a6b5609a
    Parents: 90a2cf9
    Author: Jesus Camacho Rodriguez <jcamacho@apache.org>
    Authored: Tue Jul 14 08:22:00 2015 +0700
    Committer: Ashutosh Chauhan <hashutosh@apache.org>
    Committed: Tue Jul 14 11:18:33 2015 -0700

    ----------------------------------------------------------------------
      .../hadoop/hive/ql/parse/CalcitePlanner.java | 64 +++++++++-----------
      1 file changed, 30 insertions(+), 34 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/999e0e36/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
    index 84bb951..1ea236b 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
    @@ -863,38 +863,20 @@ public class CalcitePlanner extends SemanticAnalyzer {
            calciteOptimizedPlan = hepPlanner.findBestExp();

            // 4. Run rule to try to remove projects on top of join operators
    - hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP);
    - hepPgmBldr.addRuleInstance(HiveJoinCommuteRule.INSTANCE);
    - hepPlanner = new HepPlanner(hepPgmBldr.build());
    - hepPlanner.registerMetadataProviders(list);
    - cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner));
    - hepPlanner.setRoot(calciteOptimizedPlan);
    - calciteOptimizedPlan = hepPlanner.findBestExp();
    + calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(),
    + HepMatchOrder.BOTTOM_UP, HiveJoinCommuteRule.INSTANCE);

            // 5. Run rule to fix windowing issue when it is done over
            // aggregation columns (HIVE-10627)
    - hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP);
    - hepPgmBldr.addRuleInstance(HiveWindowingFixRule.INSTANCE);
    - hepPlanner = new HepPlanner(hepPgmBldr.build());
    - hepPlanner.registerMetadataProviders(list);
    - cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner));
    - hepPlanner.setRoot(calciteOptimizedPlan);
    - calciteOptimizedPlan = hepPlanner.findBestExp();
    + calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(),
    + HepMatchOrder.BOTTOM_UP, HiveWindowingFixRule.INSTANCE);

            // 6. Run rules to aid in translation from Calcite tree to Hive tree
            if (HiveConf.getBoolVar(conf, ConfVars.HIVE_CBO_RETPATH_HIVEOP)) {
              // 6.1. Merge join into multijoin operators (if possible)
    - hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP);
    - hepPgmBldr.addRuleInstance(HiveJoinToMultiJoinRule.INSTANCE);
    - hepPgmBldr = hepPgmBldr.addRuleCollection(ImmutableList.of(
    - HiveJoinProjectTransposeRule.BOTH_PROJECT,
    - HiveJoinToMultiJoinRule.INSTANCE,
    - HiveProjectMergeRule.INSTANCE));
    - hepPlanner = new HepPlanner(hepPgmBldr.build());
    - hepPlanner.registerMetadataProviders(list);
    - cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner));
    - hepPlanner.setRoot(calciteOptimizedPlan);
    - calciteOptimizedPlan = hepPlanner.findBestExp();
    + calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, true, mdProvider.getMetadataProvider(),
    + HepMatchOrder.BOTTOM_UP, HiveJoinProjectTransposeRule.BOTH_PROJECT,
    + HiveJoinToMultiJoinRule.INSTANCE, HiveProjectMergeRule.INSTANCE);
              // The previous rules can pull up projections through join operators,
              // thus we run the field trimmer again to push them back down
              HiveRelFieldTrimmer fieldTrimmer = new HiveRelFieldTrimmer(null, HiveProject.DEFAULT_PROJECT_FACTORY,
    @@ -902,16 +884,14 @@ public class CalcitePlanner extends SemanticAnalyzer {
                  HiveSemiJoin.HIVE_SEMIJOIN_FACTORY, HiveSort.HIVE_SORT_REL_FACTORY,
                  HiveAggregate.HIVE_AGGR_REL_FACTORY, HiveUnion.UNION_REL_FACTORY);
              calciteOptimizedPlan = fieldTrimmer.trim(calciteOptimizedPlan);
    + calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(),
    + HepMatchOrder.BOTTOM_UP, ProjectRemoveRule.INSTANCE,
    + new ProjectMergeRule(false, HiveProject.DEFAULT_PROJECT_FACTORY));

              // 6.2. Introduce exchange operators below join/multijoin operators
    - hepPgmBldr = new HepProgramBuilder().addMatchOrder(HepMatchOrder.BOTTOM_UP);
    - hepPgmBldr.addRuleInstance(HiveInsertExchange4JoinRule.EXCHANGE_BELOW_JOIN);
    - hepPgmBldr.addRuleInstance(HiveInsertExchange4JoinRule.EXCHANGE_BELOW_MULTIJOIN);
    - hepPlanner = new HepPlanner(hepPgmBldr.build());
    - hepPlanner.registerMetadataProviders(list);
    - cluster.setMetadataProvider(new CachingRelMetadataProvider(chainedProvider, hepPlanner));
    - hepPlanner.setRoot(calciteOptimizedPlan);
    - calciteOptimizedPlan = hepPlanner.findBestExp();
    + calciteOptimizedPlan = hepPlan(calciteOptimizedPlan, false, mdProvider.getMetadataProvider(),
    + HepMatchOrder.BOTTOM_UP, HiveInsertExchange4JoinRule.EXCHANGE_BELOW_JOIN,
    + HiveInsertExchange4JoinRule.EXCHANGE_BELOW_MULTIJOIN);
            }

            if (LOG.isDebugEnabled() && !conf.getBoolVar(ConfVars.HIVE_IN_TEST)) {
    @@ -1006,11 +986,27 @@ public class CalcitePlanner extends SemanticAnalyzer {
           */
          private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges,
              RelMetadataProvider mdProvider, RelOptRule... rules) {
    + return hepPlan(basePlan, followPlanChanges, mdProvider,
    + HepMatchOrder.TOP_DOWN, rules);
    + }
    +
    + /**
    + * Run the HEP Planner with the given rule set.
    + *
    + * @param basePlan
    + * @param followPlanChanges
    + * @param mdProvider
    + * @param order
    + * @param rules
    + * @return optimized RelNode
    + */
    + private RelNode hepPlan(RelNode basePlan, boolean followPlanChanges, RelMetadataProvider mdProvider,
    + HepMatchOrder order, RelOptRule... rules) {

            RelNode optimizedRelNode = basePlan;
            HepProgramBuilder programBuilder = new HepProgramBuilder();
            if (followPlanChanges) {
    - programBuilder.addMatchOrder(HepMatchOrder.TOP_DOWN);
    + programBuilder.addMatchOrder(order);
              programBuilder = programBuilder.addRuleCollection(ImmutableList.copyOf(rules));
            } else {
              // TODO: Should this be also TOP_DOWN?
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11231 : CBO: Calcite Operator To Hive Operator (Calcite Return Path): make the output of ba_table_union.q more stable (Pengcheng Xiong via Ashutosh Chauhan)

    Signed-off-by: Ashutosh Chauhan <hashutosh@apache.org>


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/65e9fcf0
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/65e9fcf0
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/65e9fcf0

    Branch: refs/heads/spark
    Commit: 65e9fcf059f5e274c4b7871e7bc4034db98e8591
    Parents: 66feedc
    Author: Pengcheng Xiong <pxiong@hortonworks.com>
    Authored: Mon Jul 20 12:16:00 2015 -0700
    Committer: Ashutosh Chauhan <hashutosh@apache.org>
    Committed: Mon Jul 13 09:32:29 2015 -0700

    ----------------------------------------------------------------------
      ql/src/test/queries/clientpositive/ba_table_union.q | 2 +-
      .../results/clientpositive/ba_table_union.q.out | 16 ++++++++--------
      2 files changed, 9 insertions(+), 9 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/65e9fcf0/ql/src/test/queries/clientpositive/ba_table_union.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/ba_table_union.q b/ql/src/test/queries/clientpositive/ba_table_union.q
    index 9804659..bf35d0e 100644
    --- a/ql/src/test/queries/clientpositive/ba_table_union.q
    +++ b/ql/src/test/queries/clientpositive/ba_table_union.q
    @@ -7,7 +7,7 @@ describe extended ba_test;

      from src insert overwrite table ba_test select cast (src.key as binary), cast (src.value as binary);

    -select * from ( select key from src where key < 50 union all select cast(ba_key as string) as key from ba_test limit 50) unioned order by key limit 10;
    +select * from ( select key from src where key < 50 union all select cast(ba_key as string) as key from ba_test order by key limit 50) unioned order by key limit 10;

      drop table ba_test;


    http://git-wip-us.apache.org/repos/asf/hive/blob/65e9fcf0/ql/src/test/results/clientpositive/ba_table_union.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/ba_table_union.q.out b/ql/src/test/results/clientpositive/ba_table_union.q.out
    index 639ffda..53f16b6 100644
    --- a/ql/src/test/results/clientpositive/ba_table_union.q.out
    +++ b/ql/src/test/results/clientpositive/ba_table_union.q.out
    @@ -32,12 +32,12 @@ POSTHOOK: Input: default@src
      POSTHOOK: Output: default@ba_test
      POSTHOOK: Lineage: ba_test.ba_key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ]
      POSTHOOK: Lineage: ba_test.ba_val EXPRESSION [(src)src.FieldSchema(name:value, type:string, comment:default), ]
    -PREHOOK: query: select * from ( select key from src where key < 50 union all select cast(ba_key as string) as key from ba_test limit 50) unioned order by key limit 10
    +PREHOOK: query: select * from ( select key from src where key < 50 union all select cast(ba_key as string) as key from ba_test order by key limit 50) unioned order by key limit 10
      PREHOOK: type: QUERY
      PREHOOK: Input: default@ba_test
      PREHOOK: Input: default@src
      #### A masked pattern was here ####
    -POSTHOOK: query: select * from ( select key from src where key < 50 union all select cast(ba_key as string) as key from ba_test limit 50) unioned order by key limit 10
    +POSTHOOK: query: select * from ( select key from src where key < 50 union all select cast(ba_key as string) as key from ba_test order by key limit 50) unioned order by key limit 10
      POSTHOOK: type: QUERY
      POSTHOOK: Input: default@ba_test
      POSTHOOK: Input: default@src
    @@ -45,13 +45,13 @@ POSTHOOK: Input: default@src
      0
      0
      0
    +0
    +0
    +0
    +10
      10
    -11
    -12
    -12
    -15
    -15
    -153
    +100
    +100
      PREHOOK: query: drop table ba_test
      PREHOOK: type: DROPTABLE
      PREHOOK: Input: default@ba_test
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11243: Changing log level in Utilities.getBaseWork(Nemon Lou, reviewed by Ferdinand Xu)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e2ee458d
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e2ee458d
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e2ee458d

    Branch: refs/heads/spark
    Commit: e2ee458d8ef417785f1e9ebfca303a9d15fee8a8
    Parents: e61a1a9
    Author: Ferdinand Xu <cheng.a.xu@intel.com>
    Authored: Thu Jul 16 04:06:05 2015 -0400
    Committer: Ferdinand Xu <cheng.a.xu@intel.com>
    Committed: Thu Jul 16 04:06:05 2015 -0400

    ----------------------------------------------------------------------
      ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java | 2 +-
      1 file changed, 1 insertion(+), 1 deletion(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/e2ee458d/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    index afecb1e..d8e463d 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java
    @@ -453,7 +453,7 @@ public final class Utilities {
            return gWork;
          } catch (FileNotFoundException fnf) {
            // happens. e.g.: no reduce work.
    - LOG.info("File not found: " + fnf.getMessage());
    + LOG.debug("File not found: " + fnf.getMessage());
            LOG.info("No plan file found: "+path);
            return null;
          } catch (Exception e) {
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11024: Error inserting a date value via parameter marker (PreparedStatement.setDate) (Yongzhi Chen, reviewed by Sergio Pena)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/e74dc320
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/e74dc320
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/e74dc320

    Branch: refs/heads/spark
    Commit: e74dc320ed4c2cae0ab34cf89fb695c1a5f2e31f
    Parents: e2ee458
    Author: Sergio Pena <sergio.pena@cloudera.com>
    Authored: Thu Jul 16 07:28:16 2015 -0500
    Committer: Sergio Pena <sergio.pena@cloudera.com>
    Committed: Thu Jul 16 07:28:16 2015 -0500

    ----------------------------------------------------------------------
      .../java/org/apache/hive/jdbc/TestJdbcDriver2.java | 16 ++++++++++++++++
      .../org/apache/hive/jdbc/HivePreparedStatement.java | 2 +-
      2 files changed, 17 insertions(+), 1 deletion(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/e74dc320/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
    ----------------------------------------------------------------------
    diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
    index e4b9cd7..b2dd2ab 100644
    --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
    +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java
    @@ -2366,4 +2366,20 @@ public void testParseUrlHttpMode() throws SQLException, JdbcUriParseException,
            assertTrue(accumulatedLogs.contains(expectedLog));
          }
        }
    + @Test
    + public void testPrepareSetDate() throws Exception {
    + try {
    + String sql = "select * from " + dataTypeTableName + " where c20 = ?";
    + PreparedStatement ps = con.prepareStatement(sql);
    + java.sql.Date dtValue = java.sql.Date.valueOf("2013-01-01");
    + ps.setDate(1, dtValue);
    + ResultSet res = ps.executeQuery();
    + assertTrue(res.next());
    + assertEquals("2013-01-01", res.getString(20));
    + ps.close();
    + } catch (Exception e) {
    + e.printStackTrace();
    + fail(e.toString());
    + }
    + }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/e74dc320/jdbc/src/java/org/apache/hive/jdbc/HivePreparedStatement.java
    ----------------------------------------------------------------------
    diff --git a/jdbc/src/java/org/apache/hive/jdbc/HivePreparedStatement.java b/jdbc/src/java/org/apache/hive/jdbc/HivePreparedStatement.java
    index 2625155..7687537 100644
    --- a/jdbc/src/java/org/apache/hive/jdbc/HivePreparedStatement.java
    +++ b/jdbc/src/java/org/apache/hive/jdbc/HivePreparedStatement.java
    @@ -436,7 +436,7 @@ public class HivePreparedStatement extends HiveStatement implements PreparedStat
         */

        public void setDate(int parameterIndex, Date x) throws SQLException {
    - this.parameters.put(parameterIndex, x.toString());
    + this.parameters.put(parameterIndex, "'" + x.toString() + "'");
        }

        /*
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11251: CBO (Calcite Return Path): Extending ExprNodeConverter to consider additional types (Jesus Camacho Rodriguez, reviewed by Ashutosh Chauhan)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8662d9da
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8662d9da
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8662d9da

    Branch: refs/heads/spark
    Commit: 8662d9dae3da1cdbec3ac8c2c4f7d9f12ae5f1f0
    Parents: af4aeab
    Author: Jesus Camacho Rodriguez <jcamacho@apache.org>
    Authored: Wed Jul 15 07:06:14 2015 +0100
    Committer: Jesus Camacho Rodriguez <jcamacho@apache.org>
    Committed: Wed Jul 15 18:31:59 2015 +0100

    ----------------------------------------------------------------------
      .../calcite/translator/ExprNodeConverter.java | 17 +++++++++++++++++
      1 file changed, 17 insertions(+)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/8662d9da/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
    index bcce74a..4f0db03 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
    @@ -17,6 +17,7 @@
       */
      package org.apache.hadoop.hive.ql.optimizer.calcite.translator;

    +import java.math.BigDecimal;
      import java.sql.Date;
      import java.sql.Timestamp;
      import java.util.ArrayList;
    @@ -43,6 +44,8 @@ import org.apache.calcite.sql.type.SqlTypeUtil;
      import org.apache.commons.logging.Log;
      import org.apache.commons.logging.LogFactory;
      import org.apache.hadoop.hive.common.type.HiveChar;
    +import org.apache.hadoop.hive.common.type.HiveIntervalDayTime;
    +import org.apache.hadoop.hive.common.type.HiveIntervalYearMonth;
      import org.apache.hadoop.hive.common.type.HiveVarchar;
      import org.apache.hadoop.hive.ql.exec.UDFArgumentException;
      import org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.RexVisitor;
    @@ -199,6 +202,7 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
            return new ExprNodeConstantDesc(TypeInfoFactory.longTypeInfo, Long.valueOf(((Number) literal
                .getValue3()).longValue()));
          case FLOAT:
    + case REAL:
            return new ExprNodeConstantDesc(TypeInfoFactory.floatTypeInfo,
                Float.valueOf(((Number) literal.getValue3()).floatValue()));
          case DOUBLE:
    @@ -207,6 +211,7 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
          case DATE:
            return new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo,
              new Date(((Calendar)literal.getValue()).getTimeInMillis()));
    + case TIME:
          case TIMESTAMP: {
            Object value = literal.getValue3();
            if (value instanceof Long) {
    @@ -225,6 +230,18 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
          case CHAR:
            return new ExprNodeConstantDesc(TypeInfoFactory.getCharTypeInfo(lType.getPrecision()),
                new HiveChar((String) literal.getValue3(), lType.getPrecision()));
    + case INTERVAL_YEAR_MONTH: {
    + BigDecimal monthsBd = (BigDecimal) literal.getValue();
    + return new ExprNodeConstantDesc(TypeInfoFactory.intervalYearMonthTypeInfo,
    + new HiveIntervalYearMonth(monthsBd.intValue()));
    + }
    + case INTERVAL_DAY_TIME: {
    + BigDecimal millisBd = (BigDecimal) literal.getValue();
    + // Calcite literal is in millis, we need to convert to seconds
    + BigDecimal secsBd = millisBd.divide(BigDecimal.valueOf(1000));
    + return new ExprNodeConstantDesc(TypeInfoFactory.intervalDayTimeTypeInfo,
    + new HiveIntervalDayTime(secsBd));
    + }
          case OTHER:
          default:
            return new ExprNodeConstantDesc(TypeInfoFactory.voidTypeInfo, literal.getValue3());
  • Sunchao at Jul 20, 2015 at 8:12 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out b/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out
    index 4b84eca..12e1fbe 100644
    --- a/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out
    +++ b/ql/src/test/results/clientpositive/spark/stats_counter_partitioned.q.out
    @@ -66,8 +66,6 @@ Partition Value: [2008, 11]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -108,8 +106,6 @@ Partition Value: [2008, 12]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -190,8 +186,6 @@ Partition Value: [10, 11]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -232,8 +226,6 @@ Partition Value: [10, 12]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -323,8 +315,6 @@ Partition Value: [1997]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -363,8 +353,6 @@ Partition Value: [1994]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -403,8 +391,6 @@ Partition Value: [1998]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -443,8 +429,6 @@ Partition Value: [1996]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out b/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out
    index 80c3092..2559492 100644
    --- a/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out
    +++ b/ql/src/test/results/clientpositive/spark/stats_noscan_1.q.out
    @@ -100,8 +100,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -142,8 +140,6 @@ Partition Value: [2008-04-08, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -184,8 +180,6 @@ Partition Value: [2008-04-09, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -226,8 +220,6 @@ Partition Value: [2008-04-09, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -266,7 +258,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -381,8 +372,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart_partial
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -423,8 +412,6 @@ Partition Value: [2008-04-08, 12]
      Database: default
      Table: analyze_srcpart_partial
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -465,8 +452,6 @@ Partition Value: [2008-04-09, 11]
      Database: default
      Table: analyze_srcpart_partial
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -507,8 +492,6 @@ Partition Value: [2008-04-09, 12]
      Database: default
      Table: analyze_srcpart_partial
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out b/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out
    index 105c222..8136c39 100644
    --- a/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out
    +++ b/ql/src/test/results/clientpositive/spark/stats_noscan_2.q.out
    @@ -47,7 +47,6 @@ a int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: EXTERNAL_TABLE
    @@ -91,7 +90,6 @@ a int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: EXTERNAL_TABLE
    @@ -231,8 +229,6 @@ Partition Value: [2008-01-01]
      Database: default
      Table: anaylyze_external
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -284,8 +280,6 @@ Partition Value: [2008-01-01]
      Database: default
      Table: anaylyze_external
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/stats_only_null.q.out b/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
    index cceceef..cb0920e 100644
    --- a/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
    +++ b/ql/src/test/results/clientpositive/spark/stats_only_null.q.out
    @@ -229,8 +229,6 @@ Partition Value: [2010]
      Database: default
      Table: stats_null_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -272,8 +270,6 @@ Partition Value: [2011]
      Database: default
      Table: stats_null_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out b/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out
    index adcf150..eb0145b 100644
    --- a/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out
    +++ b/ql/src/test/results/clientpositive/spark/stats_partscan_1_23.q.out
    @@ -75,8 +75,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart_partial_scan
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -150,8 +148,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart_partial_scan
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -192,8 +188,6 @@ Partition Value: [2008-04-09, 11]
      Database: default
      Table: analyze_srcpart_partial_scan
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/statsfs.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/statsfs.q.out b/ql/src/test/results/clientpositive/spark/statsfs.q.out
    index b0bca41..2735f5f 100644
    --- a/ql/src/test/results/clientpositive/spark/statsfs.q.out
    +++ b/ql/src/test/results/clientpositive/spark/statsfs.q.out
    @@ -65,8 +65,6 @@ Partition Value: [2010]
      Database: default
      Table: t1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -106,8 +104,6 @@ Partition Value: [2011]
      Database: default
      Table: t1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -187,8 +183,6 @@ Partition Value: [2010]
      Database: default
      Table: t1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -228,8 +222,6 @@ Partition Value: [2011]
      Database: default
      Table: t1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -298,7 +290,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -364,7 +355,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -457,8 +447,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: t1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -499,8 +487,6 @@ Partition Value: [2008-04-09, 12]
      Database: default
      Table: t1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
    index c7616b4..a4b5836 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_1.q.out
    @@ -193,7 +193,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
    index 586b88b..d98b388 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_10.q.out
    @@ -255,7 +255,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_11.q.out b/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
    index 9ddf606..1e7d5cf 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_11.q.out
    @@ -245,7 +245,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_12.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_12.q.out b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out
    index fdaa941..e57626f 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_12.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_12.q.out
    @@ -245,7 +245,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_13.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_13.q.out b/ql/src/test/results/clientpositive/spark/union_remove_13.q.out
    index 995d180..bb294cd 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_13.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_13.q.out
    @@ -271,7 +271,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_14.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_14.q.out b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out
    index e23ecf4..3bb9194 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_14.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_14.q.out
    @@ -247,7 +247,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
    index 62dd62c..354a48f 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_15.q.out
    @@ -217,7 +217,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
    index 42aa20a..57059dc 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_16.q.out
    @@ -257,7 +257,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
    index c6fdfa5..c8047a9 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_17.q.out
    @@ -168,7 +168,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_18.q.out b/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
    index d732571..a4d1793 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_18.q.out
    @@ -227,7 +227,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
    index 982dd67..c6e2070 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_19.q.out
    @@ -197,7 +197,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
    index 3993ac5..d886433 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_2.q.out
    @@ -199,7 +199,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
    index 13d3b5c..4404b87 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_20.q.out
    @@ -203,7 +203,6 @@ key string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
    index f5c7636..ccefacf 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_21.q.out
    @@ -187,7 +187,6 @@ key string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_22.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_22.q.out b/ql/src/test/results/clientpositive/spark/union_remove_22.q.out
    index d00fb89..e53e963 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_22.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_22.q.out
    @@ -207,7 +207,6 @@ values2 bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_23.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_23.q.out b/ql/src/test/results/clientpositive/spark/union_remove_23.q.out
    index 3fa6ae0..adb0c62 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_23.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_23.q.out
    @@ -227,7 +227,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_24.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_24.q.out b/ql/src/test/results/clientpositive/spark/union_remove_24.q.out
    index c0ce83f..9237316 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_24.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_24.q.out
    @@ -199,7 +199,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
    index 8cd8c8d..6e96186 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_25.q.out
    @@ -218,8 +218,6 @@ Partition Value: [2004]
      Database: default
      Table: outputtbl1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 4
    @@ -429,8 +427,6 @@ Partition Value: [2008-04-08]
      Database: default
      Table: outputtbl2
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 2
    @@ -624,8 +620,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: outputtbl3
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 2

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_3.q.out b/ql/src/test/results/clientpositive/spark/union_remove_3.q.out
    index 2126c1d..565b834 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_3.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_3.q.out
    @@ -189,7 +189,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_4.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_4.q.out b/ql/src/test/results/clientpositive/spark/union_remove_4.q.out
    index 7dd8e3e..a5730c1 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_4.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_4.q.out
    @@ -243,7 +243,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_5.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out
    index fe1fd8b..908298d 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_5.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_5.q.out
    @@ -251,7 +251,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_7.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_7.q.out b/ql/src/test/results/clientpositive/spark/union_remove_7.q.out
    index 530be3f..5e88d10 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_7.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_7.q.out
    @@ -197,7 +197,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_8.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out
    index 06adb05..b33767c 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_8.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_8.q.out
    @@ -203,7 +203,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/spark/union_remove_9.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_9.q.out b/ql/src/test/results/clientpositive/spark/union_remove_9.q.out
    index 7857999..e837bd7 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_9.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_9.q.out
    @@ -259,7 +259,6 @@ values bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats1.q.out b/ql/src/test/results/clientpositive/stats1.q.out
    index 6f560d3..ac34bbb 100644
    --- a/ql/src/test/results/clientpositive/stats1.q.out
    +++ b/ql/src/test/results/clientpositive/stats1.q.out
    @@ -178,7 +178,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -228,7 +227,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats10.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats10.q.out b/ql/src/test/results/clientpositive/stats10.q.out
    index dc8aa7f..7824cbd 100644
    --- a/ql/src/test/results/clientpositive/stats10.q.out
    +++ b/ql/src/test/results/clientpositive/stats10.q.out
    @@ -413,8 +413,6 @@ Partition Value: [1]
      Database: default
      Table: bucket3_1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 2
    @@ -454,8 +452,6 @@ Partition Value: [2]
      Database: default
      Table: bucket3_1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 2
    @@ -493,7 +489,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats11.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats11.q.out b/ql/src/test/results/clientpositive/stats11.q.out
    index e51f049..4ed235f 100644
    --- a/ql/src/test/results/clientpositive/stats11.q.out
    +++ b/ql/src/test/results/clientpositive/stats11.q.out
    @@ -86,8 +86,6 @@ Partition Value: [2008-04-08]
      Database: default
      Table: srcbucket_mapjoin_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -135,8 +133,6 @@ Partition Value: [2008-04-08]
      Database: default
      Table: srcbucket_mapjoin_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 2
    @@ -184,8 +180,6 @@ Partition Value: [2008-04-08]
      Database: default
      Table: srcbucket_mapjoin_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 3
    @@ -233,8 +227,6 @@ Partition Value: [2008-04-08]
      Database: default
      Table: srcbucket_mapjoin_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 4

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats12.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats12.q.out b/ql/src/test/results/clientpositive/stats12.q.out
    index d5ca59b..c6e7c68 100644
    --- a/ql/src/test/results/clientpositive/stats12.q.out
    +++ b/ql/src/test/results/clientpositive/stats12.q.out
    @@ -205,7 +205,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -244,8 +243,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -286,8 +283,6 @@ Partition Value: [2008-04-08, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -328,8 +323,6 @@ Partition Value: [2008-04-09, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -370,8 +363,6 @@ Partition Value: [2008-04-09, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats13.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats13.q.out b/ql/src/test/results/clientpositive/stats13.q.out
    index e34f0b7..7415728 100644
    --- a/ql/src/test/results/clientpositive/stats13.q.out
    +++ b/ql/src/test/results/clientpositive/stats13.q.out
    @@ -155,7 +155,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -194,8 +193,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -236,8 +233,6 @@ Partition Value: [2008-04-08, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -278,8 +273,6 @@ Partition Value: [2008-04-09, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -320,8 +313,6 @@ Partition Value: [2008-04-09, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -368,7 +359,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats14.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats14.q.out b/ql/src/test/results/clientpositive/stats14.q.out
    index f12b136..f34720d 100644
    --- a/ql/src/test/results/clientpositive/stats14.q.out
    +++ b/ql/src/test/results/clientpositive/stats14.q.out
    @@ -38,7 +38,6 @@ value string default
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -142,7 +141,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -181,8 +179,6 @@ Partition Value: [2010-04-08, 11]
      Database: default
      Table: stats_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -223,8 +219,6 @@ Partition Value: [2010-04-08, 12]
      Database: default
      Table: stats_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -283,7 +277,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats15.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats15.q.out b/ql/src/test/results/clientpositive/stats15.q.out
    index a60dee2..aad2e3a 100644
    --- a/ql/src/test/results/clientpositive/stats15.q.out
    +++ b/ql/src/test/results/clientpositive/stats15.q.out
    @@ -38,7 +38,6 @@ value string default
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -142,7 +141,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -181,8 +179,6 @@ Partition Value: [2010-04-08, 11]
      Database: default
      Table: stats_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -223,8 +219,6 @@ Partition Value: [2010-04-08, 12]
      Database: default
      Table: stats_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -283,7 +277,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats16.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats16.q.out b/ql/src/test/results/clientpositive/stats16.q.out
    index 3f0f2ea..2e3cadb 100644
    --- a/ql/src/test/results/clientpositive/stats16.q.out
    +++ b/ql/src/test/results/clientpositive/stats16.q.out
    @@ -24,7 +24,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -73,7 +72,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats18.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats18.q.out b/ql/src/test/results/clientpositive/stats18.q.out
    index a061846..a7d6ab8 100644
    --- a/ql/src/test/results/clientpositive/stats18.q.out
    +++ b/ql/src/test/results/clientpositive/stats18.q.out
    @@ -44,8 +44,6 @@ Partition Value: [2010-04-08, 13]
      Database: default
      Table: stats_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -94,8 +92,6 @@ Partition Value: [2010-04-08, 13]
      Database: default
      Table: stats_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 2

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats19.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats19.q.out b/ql/src/test/results/clientpositive/stats19.q.out
    index 499a8bd..ea56f3a 100644
    --- a/ql/src/test/results/clientpositive/stats19.q.out
    +++ b/ql/src/test/results/clientpositive/stats19.q.out
    @@ -98,8 +98,6 @@ Partition Value: [2010-04-08, 13]
      Database: default
      Table: stats_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -152,8 +150,6 @@ Partition Value: [2010-04-08, 13]
      Database: default
      Table: stats_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -206,8 +202,6 @@ Partition Value: [2010-04-08, 13]
      Database: default
      Table: stats_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -312,8 +306,6 @@ Partition Value: [2010-04-08, 13]
      Database: default
      Table: stats_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -366,8 +358,6 @@ Partition Value: [2010-04-08, 13]
      Database: default
      Table: stats_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -420,8 +410,6 @@ Partition Value: [2010-04-08, 13]
      Database: default
      Table: stats_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats2.q.out b/ql/src/test/results/clientpositive/stats2.q.out
    index 5e305d3..ac1d5cb 100644
    --- a/ql/src/test/results/clientpositive/stats2.q.out
    +++ b/ql/src/test/results/clientpositive/stats2.q.out
    @@ -96,7 +96,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -176,7 +175,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats20.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats20.q.out b/ql/src/test/results/clientpositive/stats20.q.out
    index 4ac7bc5..d7e52b4 100644
    --- a/ql/src/test/results/clientpositive/stats20.q.out
    +++ b/ql/src/test/results/clientpositive/stats20.q.out
    @@ -39,7 +39,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -89,7 +88,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats3.q.out b/ql/src/test/results/clientpositive/stats3.q.out
    index dd3a95b..2afb76e 100644
    --- a/ql/src/test/results/clientpositive/stats3.q.out
    +++ b/ql/src/test/results/clientpositive/stats3.q.out
    @@ -82,7 +82,6 @@ col1 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -227,7 +226,6 @@ pcol2 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats4.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats4.q.out b/ql/src/test/results/clientpositive/stats4.q.out
    index 39d5413..9ced932 100644
    --- a/ql/src/test/results/clientpositive/stats4.q.out
    +++ b/ql/src/test/results/clientpositive/stats4.q.out
    @@ -2308,8 +2308,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: nzhang_part1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -2350,8 +2348,6 @@ Partition Value: [2008-04-08, 12]
      Database: default
      Table: nzhang_part1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -2392,8 +2388,6 @@ Partition Value: [2008-12-31, 11]
      Database: default
      Table: nzhang_part2
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -2434,8 +2428,6 @@ Partition Value: [2008-12-31, 12]
      Database: default
      Table: nzhang_part2
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -2474,7 +2466,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -2511,7 +2502,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats5.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats5.q.out b/ql/src/test/results/clientpositive/stats5.q.out
    index 484e834..23d4e6b 100644
    --- a/ql/src/test/results/clientpositive/stats5.q.out
    +++ b/ql/src/test/results/clientpositive/stats5.q.out
    @@ -49,7 +49,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats6.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats6.q.out b/ql/src/test/results/clientpositive/stats6.q.out
    index b4435f2..a387075 100644
    --- a/ql/src/test/results/clientpositive/stats6.q.out
    +++ b/ql/src/test/results/clientpositive/stats6.q.out
    @@ -79,8 +79,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -121,8 +119,6 @@ Partition Value: [2008-04-08, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -163,8 +159,6 @@ Partition Value: [2008-04-09, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -205,8 +199,6 @@ Partition Value: [2008-04-09, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -245,7 +237,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats7.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats7.q.out b/ql/src/test/results/clientpositive/stats7.q.out
    index 03b76e2..7f32764 100644
    --- a/ql/src/test/results/clientpositive/stats7.q.out
    +++ b/ql/src/test/results/clientpositive/stats7.q.out
    @@ -90,8 +90,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -132,8 +130,6 @@ Partition Value: [2008-04-08, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -172,7 +168,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats8.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats8.q.out b/ql/src/test/results/clientpositive/stats8.q.out
    index ae632e1..80dd4e8 100644
    --- a/ql/src/test/results/clientpositive/stats8.q.out
    +++ b/ql/src/test/results/clientpositive/stats8.q.out
    @@ -86,8 +86,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -126,7 +124,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -196,8 +193,6 @@ Partition Value: [2008-04-08, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -269,8 +264,6 @@ Partition Value: [2008-04-09, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -342,8 +335,6 @@ Partition Value: [2008-04-09, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -427,8 +418,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -469,8 +458,6 @@ Partition Value: [2008-04-08, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -511,8 +498,6 @@ Partition Value: [2008-04-09, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -553,8 +538,6 @@ Partition Value: [2008-04-09, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -593,7 +576,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats9.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats9.q.out b/ql/src/test/results/clientpositive/stats9.q.out
    index 8563f3b..e7c7743 100644
    --- a/ql/src/test/results/clientpositive/stats9.q.out
    +++ b/ql/src/test/results/clientpositive/stats9.q.out
    @@ -57,7 +57,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_counter.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats_counter.q.out b/ql/src/test/results/clientpositive/stats_counter.q.out
    index e2980e8..8b3dcea 100644
    --- a/ql/src/test/results/clientpositive/stats_counter.q.out
    +++ b/ql/src/test/results/clientpositive/stats_counter.q.out
    @@ -32,7 +32,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -80,7 +79,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out b/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
    index ab1270c..626dcff 100644
    --- a/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
    +++ b/ql/src/test/results/clientpositive/stats_counter_partitioned.q.out
    @@ -66,8 +66,6 @@ Partition Value: [2008, 11]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -108,8 +106,6 @@ Partition Value: [2008, 12]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -190,8 +186,6 @@ Partition Value: [10, 11]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -232,8 +226,6 @@ Partition Value: [10, 12]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -323,8 +315,6 @@ Partition Value: [1997]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -363,8 +353,6 @@ Partition Value: [1994]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -403,8 +391,6 @@ Partition Value: [1998]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -443,8 +429,6 @@ Partition Value: [1996]
      Database: default
      Table: dummy
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_empty_partition.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats_empty_partition.q.out b/ql/src/test/results/clientpositive/stats_empty_partition.q.out
    index 65e0a6f..c13817e 100644
    --- a/ql/src/test/results/clientpositive/stats_empty_partition.q.out
    +++ b/ql/src/test/results/clientpositive/stats_empty_partition.q.out
    @@ -43,8 +43,6 @@ Partition Value: [1]
      Database: default
      Table: tmptable
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_invalidation.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats_invalidation.q.out b/ql/src/test/results/clientpositive/stats_invalidation.q.out
    index f8c1731..1bb7dc6 100644
    --- a/ql/src/test/results/clientpositive/stats_invalidation.q.out
    +++ b/ql/src/test/results/clientpositive/stats_invalidation.q.out
    @@ -40,7 +40,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -85,7 +84,6 @@ new_col string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out b/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out
    index 5a05eb4..63372c5 100644
    --- a/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out
    +++ b/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.7.out
    @@ -74,8 +74,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: stats_list_bucket
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 4
    @@ -151,7 +149,6 @@ c2 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out b/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out
    index 1b26365..8688cee 100644
    --- a/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out
    +++ b/ql/src/test/results/clientpositive/stats_list_bucket.q.java1.8.out
    @@ -74,8 +74,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: stats_list_bucket
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 4
    @@ -151,7 +149,6 @@ c2 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_noscan_1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats_noscan_1.q.out b/ql/src/test/results/clientpositive/stats_noscan_1.q.out
    index 80c3092..2559492 100644
    --- a/ql/src/test/results/clientpositive/stats_noscan_1.q.out
    +++ b/ql/src/test/results/clientpositive/stats_noscan_1.q.out
    @@ -100,8 +100,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -142,8 +140,6 @@ Partition Value: [2008-04-08, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -184,8 +180,6 @@ Partition Value: [2008-04-09, 11]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -226,8 +220,6 @@ Partition Value: [2008-04-09, 12]
      Database: default
      Table: analyze_srcpart
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -266,7 +258,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -381,8 +372,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart_partial
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -423,8 +412,6 @@ Partition Value: [2008-04-08, 12]
      Database: default
      Table: analyze_srcpart_partial
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -465,8 +452,6 @@ Partition Value: [2008-04-09, 11]
      Database: default
      Table: analyze_srcpart_partial
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -507,8 +492,6 @@ Partition Value: [2008-04-09, 12]
      Database: default
      Table: analyze_srcpart_partial
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_noscan_2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats_noscan_2.q.out b/ql/src/test/results/clientpositive/stats_noscan_2.q.out
    index 105c222..8136c39 100644
    --- a/ql/src/test/results/clientpositive/stats_noscan_2.q.out
    +++ b/ql/src/test/results/clientpositive/stats_noscan_2.q.out
    @@ -47,7 +47,6 @@ a int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: EXTERNAL_TABLE
    @@ -91,7 +90,6 @@ a int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: EXTERNAL_TABLE
    @@ -231,8 +229,6 @@ Partition Value: [2008-01-01]
      Database: default
      Table: anaylyze_external
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -284,8 +280,6 @@ Partition Value: [2008-01-01]
      Database: default
      Table: anaylyze_external
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_only_null.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats_only_null.q.out b/ql/src/test/results/clientpositive/stats_only_null.q.out
    index f99b85a..55c5970 100644
    --- a/ql/src/test/results/clientpositive/stats_only_null.q.out
    +++ b/ql/src/test/results/clientpositive/stats_only_null.q.out
    @@ -217,8 +217,6 @@ Partition Value: [2010]
      Database: default
      Table: stats_null_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -260,8 +258,6 @@ Partition Value: [2011]
      Database: default
      Table: stats_null_part
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/stats_partscan_1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/stats_partscan_1.q.out b/ql/src/test/results/clientpositive/stats_partscan_1.q.out
    index b15fbc7..e2a3bbd 100644
    --- a/ql/src/test/results/clientpositive/stats_partscan_1.q.out
    +++ b/ql/src/test/results/clientpositive/stats_partscan_1.q.out
    @@ -74,8 +74,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart_partial_scan
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
    @@ -149,8 +147,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart_partial_scan
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -191,8 +187,6 @@ Partition Value: [2008-04-09, 11]
      Database: default
      Table: analyze_srcpart_partial_scan
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11137. In DateWritable remove use of LazyBinaryUtils.


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7788968c
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7788968c
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7788968c

    Branch: refs/heads/spark
    Commit: 7788968ce7baced7b6cbd575317d7413fc75d143
    Parents: b6f48cb
    Author: Owen O'Malley <omalley@apache.org>
    Authored: Sun Jun 28 17:04:49 2015 -0700
    Committer: Owen O'Malley <omalley@apache.org>
    Committed: Fri Jul 17 09:51:52 2015 -0700

    ----------------------------------------------------------------------
      .../apache/hadoop/hive/serde2/io/DateWritable.java | 14 --------------
      .../hadoop/hive/serde2/lazybinary/LazyBinaryDate.java | 4 +++-
      .../hive/serde2/lazybinary/LazyBinarySerDe.java | 11 ++++++++---
      3 files changed, 11 insertions(+), 18 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/7788968c/serde/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java
    ----------------------------------------------------------------------
    diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java b/serde/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java
    index 6cedf4c..e69351f 100644
    --- a/serde/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java
    +++ b/serde/src/java/org/apache/hadoop/hive/serde2/io/DateWritable.java
    @@ -25,9 +25,6 @@ import java.util.Calendar;
      import java.util.TimeZone;
      import java.util.concurrent.TimeUnit;

    -import org.apache.hadoop.hive.serde2.ByteStream.RandomAccessOutput;
    -import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils;
    -import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils.VInt;
      import org.apache.hadoop.io.WritableComparable;
      import org.apache.hadoop.io.WritableUtils;

    @@ -146,17 +143,6 @@ public class DateWritable implements WritableComparable<DateWritable> {
          return millisToDays(millisLocal);
        }

    - public void setFromBytes(byte[] bytes, int offset, int length, VInt vInt) {
    - LazyBinaryUtils.readVInt(bytes, offset, vInt);
    - assert (length == vInt.length);
    - set(vInt.value);
    - }
    -
    - public void writeToByteStream(RandomAccessOutput byteStream) {
    - LazyBinaryUtils.writeVInt(byteStream, getDays());
    - }
    -
    -
        @Override
        public void readFields(DataInput in) throws IOException {
          daysSinceEpoch = WritableUtils.readVInt(in);

    http://git-wip-us.apache.org/repos/asf/hive/blob/7788968c/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryDate.java
    ----------------------------------------------------------------------
    diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryDate.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryDate.java
    index d0c2504..4200e26 100644
    --- a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryDate.java
    +++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryDate.java
    @@ -55,6 +55,8 @@ public class LazyBinaryDate extends
         */
        @Override
        public void init(ByteArrayRef bytes, int start, int length) {
    - data.setFromBytes(bytes.getData(), start, length, vInt);
    + LazyBinaryUtils.readVInt(bytes.getData(), start, vInt);
    + assert (length == vInt.length);
    + data.set(vInt.value);
        }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/7788968c/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java
    ----------------------------------------------------------------------
    diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java
    index 3d14fbe..a5dc5d8 100644
    --- a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java
    +++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinarySerDe.java
    @@ -250,9 +250,9 @@ public class LazyBinarySerDe extends AbstractSerDe {
         *
         * @param byteStream
         * the byte stream storing the serialization data
    - * @param obj
    + * @param fieldData
         * the struct object to serialize
    - * @param objInspector
    + * @param fieldOis
         * the struct object inspector
         * @param warnedOnceNullMapKey a boolean indicating whether a warning
         * has been issued once already when encountering null map keys
    @@ -311,6 +311,11 @@ public class LazyBinarySerDe extends AbstractSerDe {
          public boolean value;
        }

    + private static void writeDateToByteStream(RandomAccessOutput byteStream,
    + DateWritable date) {
    + LazyBinaryUtils.writeVInt(byteStream, date.getDays());
    + }
    +
        /**
         * A recursive function that serialize an object to a byte buffer based on its
         * object inspector.
    @@ -422,7 +427,7 @@ public class LazyBinarySerDe extends AbstractSerDe {

            case DATE: {
              DateWritable d = ((DateObjectInspector) poi).getPrimitiveWritableObject(obj);
    - d.writeToByteStream(byteStream);
    + writeDateToByteStream(byteStream, d);
              return;
            }
            case TIMESTAMP: {
  • Sunchao at Jul 20, 2015 at 8:12 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats7.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/beelinepositive/stats7.q.out b/ql/src/test/results/beelinepositive/stats7.q.out
    index 57870c5..e375cbf 100644
    --- a/ql/src/test/results/beelinepositive/stats7.q.out
    +++ b/ql/src/test/results/beelinepositive/stats7.q.out
    @@ -61,7 +61,6 @@ No rows selected
      'Table: ','analyze_srcpart ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats7.db/analyze_srcpart/ds=2008-04-08/hr=11',''
      'Partition Parameters:','',''
      '','numFiles ','1 '
    @@ -100,7 +99,6 @@ No rows selected
      'Table: ','analyze_srcpart ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats7.db/analyze_srcpart/ds=2008-04-08/hr=12',''
      'Partition Parameters:','',''
      '','numFiles ','1 '
    @@ -139,7 +137,6 @@ No rows selected
      'Owner: ','!!{user.name}!! ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Retention: ','0 ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats7.db/analyze_srcpart',''
      'Table Type: ','MANAGED_TABLE ',''

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats8.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/beelinepositive/stats8.q.out b/ql/src/test/results/beelinepositive/stats8.q.out
    index 017045c..1593bba 100644
    --- a/ql/src/test/results/beelinepositive/stats8.q.out
    +++ b/ql/src/test/results/beelinepositive/stats8.q.out
    @@ -59,7 +59,6 @@ No rows selected
      'Table: ','analyze_srcpart ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-08/hr=11',''
      'Partition Parameters:','',''
      '','numFiles ','1 '
    @@ -97,7 +96,6 @@ No rows selected
      'Owner: ','!!{user.name}!! ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Retention: ','0 ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart',''
      'Table Type: ','MANAGED_TABLE ',''
    @@ -165,7 +163,6 @@ No rows selected
      'Table: ','analyze_srcpart ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-08/hr=12',''
      'Partition Parameters:','',''
      '','numFiles ','1 '
    @@ -230,7 +227,6 @@ No rows selected
      'Table: ','analyze_srcpart ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-09/hr=11',''
      'Partition Parameters:','',''
      '','numFiles ','1 '
    @@ -295,7 +291,6 @@ No rows selected
      'Table: ','analyze_srcpart ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-09/hr=12',''
      'Partition Parameters:','',''
      '','numFiles ','1 '
    @@ -361,7 +356,6 @@ No rows selected
      'Table: ','analyze_srcpart ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-08/hr=11',''
      'Partition Parameters:','',''
      '','numFiles ','1 '
    @@ -400,7 +394,6 @@ No rows selected
      'Table: ','analyze_srcpart ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-08/hr=12',''
      'Partition Parameters:','',''
      '','numFiles ','1 '
    @@ -439,7 +432,6 @@ No rows selected
      'Table: ','analyze_srcpart ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-09/hr=11',''
      'Partition Parameters:','',''
      '','numFiles ','1 '
    @@ -478,7 +470,6 @@ No rows selected
      'Table: ','analyze_srcpart ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart/ds=2008-04-09/hr=12',''
      'Partition Parameters:','',''
      '','numFiles ','1 '
    @@ -516,7 +507,6 @@ No rows selected
      'Owner: ','!!{user.name}!! ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Retention: ','0 ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats8.db/analyze_srcpart',''
      'Table Type: ','MANAGED_TABLE ',''

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats9.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/beelinepositive/stats9.q.out b/ql/src/test/results/beelinepositive/stats9.q.out
    index 8697d61..852d816 100644
    --- a/ql/src/test/results/beelinepositive/stats9.q.out
    +++ b/ql/src/test/results/beelinepositive/stats9.q.out
    @@ -48,7 +48,6 @@ No rows selected
      'Owner: ','!!{user.name}!! ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Retention: ','0 ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats9.db/analyze_srcbucket',''
      'Table Type: ','MANAGED_TABLE ',''

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/beelinepositive/stats_empty_partition.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/beelinepositive/stats_empty_partition.q.out b/ql/src/test/results/beelinepositive/stats_empty_partition.q.out
    index ab6839e..415cf90 100644
    --- a/ql/src/test/results/beelinepositive/stats_empty_partition.q.out
    +++ b/ql/src/test/results/beelinepositive/stats_empty_partition.q.out
    @@ -33,7 +33,6 @@ No rows selected
      'Table: ','tmptable ',''
      'CreateTime: ','!!TIMESTAMP!!',''
      'LastAccessTime: ','UNKNOWN ',''
    -'Protect Mode: ','None ',''
      'Location: ','!!{hive.metastore.warehouse.dir}!!/stats_empty_partition.db/tmptable/part=1',''
      'Partition Parameters:','',''
      '','numFiles ','1 '

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientnegative/alter_file_format.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientnegative/alter_file_format.q.out b/ql/src/test/results/clientnegative/alter_file_format.q.out
    index d0c470b..96f1bfb 100644
    --- a/ql/src/test/results/clientnegative/alter_file_format.q.out
    +++ b/ql/src/test/results/clientnegative/alter_file_format.q.out
    @@ -20,7 +20,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out b/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out
    index 447dc3a..1cbfd75 100644
    --- a/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out
    +++ b/ql/src/test/results/clientnegative/alter_view_as_select_with_partition.q.out
    @@ -48,7 +48,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out b/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out
    index 321ebe5..d03c249 100644
    --- a/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out
    +++ b/ql/src/test/results/clientnegative/stats_partialscan_autogether.q.out
    @@ -65,8 +65,6 @@ Partition Value: [2008-04-08, 11]
      Database: default
      Table: analyze_srcpart_partial_scan
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_file_format.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/alter_file_format.q.out b/ql/src/test/results/clientpositive/alter_file_format.q.out
    index 4d6389a..c9e88f8 100644
    --- a/ql/src/test/results/clientpositive/alter_file_format.q.out
    +++ b/ql/src/test/results/clientpositive/alter_file_format.q.out
    @@ -20,7 +20,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -59,7 +58,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -104,7 +102,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -149,7 +146,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -194,7 +190,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -239,7 +234,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -284,7 +278,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -349,8 +342,6 @@ Partition Value: [2010]
      Database: default
      Table: alter_partition_format_test
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
      #### A masked pattern was here ####

    @@ -394,8 +385,6 @@ Partition Value: [2010]
      Database: default
      Table: alter_partition_format_test
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
      #### A masked pattern was here ####
    @@ -445,8 +434,6 @@ Partition Value: [2010]
      Database: default
      Table: alter_partition_format_test
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
      #### A masked pattern was here ####
    @@ -496,8 +483,6 @@ Partition Value: [2010]
      Database: default
      Table: alter_partition_format_test
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
      #### A masked pattern was here ####
    @@ -547,8 +532,6 @@ Partition Value: [2010]
      Database: default
      Table: alter_partition_format_test
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
      #### A masked pattern was here ####
    @@ -598,8 +581,6 @@ Partition Value: [2010]
      Database: default
      Table: alter_partition_format_test
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
      #### A masked pattern was here ####

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out b/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
    index 43cc4ef..cefe069 100644
    --- a/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
    +++ b/ql/src/test/results/clientpositive/alter_merge_stats_orc.q.out
    @@ -85,7 +85,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -137,7 +136,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -242,8 +240,6 @@ Partition Value: [2011]
      Database: default
      Table: src_orc_merge_test_part_stat
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 3
    @@ -293,8 +289,6 @@ Partition Value: [2011]
      Database: default
      Table: src_orc_merge_test_part_stat
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 3
    @@ -352,8 +346,6 @@ Partition Value: [2011]
      Database: default
      Table: src_orc_merge_test_part_stat
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
    index d3bc389..b1dfd7c 100644
    --- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
    +++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table.q.out
    @@ -34,7 +34,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -82,8 +81,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -145,8 +142,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -184,7 +179,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -231,7 +225,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -269,8 +262,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -316,7 +307,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -354,8 +344,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
      #### A masked pattern was here ####
    @@ -406,7 +394,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -454,7 +441,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -502,7 +488,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -550,7 +535,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
    index 185cf1c..e5f8e7f 100644
    --- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
    +++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2.q.out
    @@ -32,7 +32,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -80,8 +79,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -129,7 +126,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -177,8 +173,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -226,7 +220,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -274,8 +267,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -323,7 +314,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -371,8 +361,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -420,7 +408,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -468,8 +455,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -517,7 +502,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -565,8 +549,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -614,7 +596,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -662,8 +643,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -711,7 +690,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -759,8 +737,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -808,7 +784,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -856,8 +831,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
    index 86c12c7..f919f10 100644
    --- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
    +++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table2_h23.q.out
    @@ -33,7 +33,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -81,8 +80,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -130,7 +127,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -178,8 +174,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 8
    @@ -227,7 +221,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -275,8 +268,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 8
    @@ -324,7 +315,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -372,8 +362,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 8
    @@ -421,7 +409,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -469,8 +456,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 4
    @@ -518,7 +503,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -566,8 +550,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 4
    @@ -615,7 +597,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -663,8 +644,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 4
    @@ -712,7 +691,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -760,8 +738,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 4
    @@ -809,7 +785,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -857,8 +832,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
    index 42a9796..4d0f841 100644
    --- a/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
    +++ b/ql/src/test/results/clientpositive/alter_numbuckets_partitioned_table_h23.q.out
    @@ -35,7 +35,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -83,8 +82,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 8
    @@ -146,8 +143,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 12
    @@ -185,7 +180,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -232,7 +226,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -270,8 +263,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 12
    @@ -317,7 +308,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -355,8 +345,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
      #### A masked pattern was here ####
    @@ -407,7 +395,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -455,7 +442,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -503,7 +489,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -551,7 +536,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out b/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
    index eb08b6f..f5e8d1f 100644
    --- a/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
    +++ b/ql/src/test/results/clientpositive/alter_partition_clusterby_sortby.q.out
    @@ -47,8 +47,6 @@ Partition Value: [abc]
      Database: default
      Table: alter_table_partition_clusterby_sortby
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
      #### A masked pattern was here ####
    @@ -102,8 +100,6 @@ Partition Value: [abc]
      Database: default
      Table: alter_table_partition_clusterby_sortby
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
      #### A masked pattern was here ####
    @@ -157,8 +153,6 @@ Partition Value: [abc]
      Database: default
      Table: alter_table_partition_clusterby_sortby
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE false
      #### A masked pattern was here ####
    @@ -201,7 +195,6 @@ c string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_skewed_table.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/alter_skewed_table.q.out b/ql/src/test/results/clientpositive/alter_skewed_table.q.out
    index 0e0c5b0..03904e6 100644
    --- a/ql/src/test/results/clientpositive/alter_skewed_table.q.out
    +++ b/ql/src/test/results/clientpositive/alter_skewed_table.q.out
    @@ -20,7 +20,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -59,7 +58,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -120,7 +118,6 @@ value string
      # Detailed Table Information
      Database: skew_test
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -159,7 +156,6 @@ value string
      # Detailed Table Information
      Database: skew_test
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -214,7 +210,6 @@ value string
      # Detailed Table Information
      Database: skew_test
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -255,7 +250,6 @@ value string
      # Detailed Table Information
      Database: skew_test
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out b/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
    index b03146b..40974e4 100644
    --- a/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
    +++ b/ql/src/test/results/clientpositive/alter_table_not_sorted.q.out
    @@ -20,7 +20,6 @@ b int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -60,7 +59,6 @@ b int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_table_serde2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/alter_table_serde2.q.out b/ql/src/test/results/clientpositive/alter_table_serde2.q.out
    index dc1dae3..3b63e7d 100644
    --- a/ql/src/test/results/clientpositive/alter_table_serde2.q.out
    +++ b/ql/src/test/results/clientpositive/alter_table_serde2.q.out
    @@ -31,7 +31,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -79,8 +78,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1
    @@ -130,7 +127,6 @@ ds string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -179,8 +175,6 @@ Partition Value: [1]
      Database: default
      Table: tst1
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
       COLUMN_STATS_ACCURATE true
       numFiles 1

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/alter_view_as_select.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/alter_view_as_select.q.out b/ql/src/test/results/clientpositive/alter_view_as_select.q.out
    index ff2d860..c89c0dc 100644
    --- a/ql/src/test/results/clientpositive/alter_view_as_select.q.out
    +++ b/ql/src/test/results/clientpositive/alter_view_as_select.q.out
    @@ -30,7 +30,6 @@ hr string
      # Detailed Table Information
      Database: tv
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -72,7 +71,6 @@ value string
      # Detailed Table Information
      Database: tv
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -123,7 +121,6 @@ value string
      # Detailed Table Information
      Database: tv
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/authorization_index.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/authorization_index.q.out b/ql/src/test/results/clientpositive/authorization_index.q.out
    index 540d11b..adc02ad 100644
    --- a/ql/src/test/results/clientpositive/authorization_index.q.out
    +++ b/ql/src/test/results/clientpositive/authorization_index.q.out
    @@ -28,7 +28,6 @@ _offsets array<bigint>
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: INDEX_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/bucket5.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/bucket5.q.out b/ql/src/test/results/clientpositive/bucket5.q.out
    index 0c8418d..2e2984b 100644
    --- a/ql/src/test/results/clientpositive/bucket5.q.out
    +++ b/ql/src/test/results/clientpositive/bucket5.q.out
    @@ -514,7 +514,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out b/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out
    index c5a253d..a5df511 100644
    --- a/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out
    +++ b/ql/src/test/results/clientpositive/create_alter_list_bucketing_table1.q.out
    @@ -31,7 +31,6 @@ col3 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -76,7 +75,6 @@ col3 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -126,7 +124,6 @@ col3 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -183,7 +180,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -233,7 +229,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -282,7 +277,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -332,7 +326,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_like.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/create_like.q.out b/ql/src/test/results/clientpositive/create_like.q.out
    index 27142c6..c93b134 100644
    --- a/ql/src/test/results/clientpositive/create_like.q.out
    +++ b/ql/src/test/results/clientpositive/create_like.q.out
    @@ -20,7 +20,6 @@ b string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -59,7 +58,6 @@ b string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -106,7 +104,6 @@ b string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: EXTERNAL_TABLE
    @@ -338,7 +335,6 @@ last_name string last name of actor playing role
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -387,7 +383,6 @@ last_name string last name of actor playing role
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -435,7 +430,6 @@ b string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -475,7 +469,6 @@ col2 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -514,7 +507,6 @@ col2 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -563,7 +555,6 @@ col2 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_like2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/create_like2.q.out b/ql/src/test/results/clientpositive/create_like2.q.out
    index e5c6f9c..8b001a9 100644
    --- a/ql/src/test/results/clientpositive/create_like2.q.out
    +++ b/ql/src/test/results/clientpositive/create_like2.q.out
    @@ -42,7 +42,6 @@ b string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_like_tbl_props.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/create_like_tbl_props.q.out b/ql/src/test/results/clientpositive/create_like_tbl_props.q.out
    index e151897..d7f9dd2 100644
    --- a/ql/src/test/results/clientpositive/create_like_tbl_props.q.out
    +++ b/ql/src/test/results/clientpositive/create_like_tbl_props.q.out
    @@ -24,7 +24,6 @@ value string default
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -68,7 +67,6 @@ value string default
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -112,7 +110,6 @@ value string default
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -157,7 +154,6 @@ value string default
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -212,7 +208,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_like_view.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/create_like_view.q.out b/ql/src/test/results/clientpositive/create_like_view.q.out
    index 0978fcf..e2dc2c4 100644
    --- a/ql/src/test/results/clientpositive/create_like_view.q.out
    +++ b/ql/src/test/results/clientpositive/create_like_view.q.out
    @@ -48,7 +48,6 @@ b string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -105,7 +104,6 @@ b string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -160,7 +158,6 @@ b string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: EXTERNAL_TABLE
    @@ -281,7 +278,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_or_replace_view.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/create_or_replace_view.q.out b/ql/src/test/results/clientpositive/create_or_replace_view.q.out
    index aab78ba..dd5bf13 100644
    --- a/ql/src/test/results/clientpositive/create_or_replace_view.q.out
    +++ b/ql/src/test/results/clientpositive/create_or_replace_view.q.out
    @@ -30,7 +30,6 @@ hr string
      # Detailed Table Information
      Database: vt
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -121,7 +120,6 @@ hr string
      # Detailed Table Information
      Database: vt
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -215,7 +213,6 @@ hr string
      # Detailed Table Information
      Database: vt
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -286,7 +283,6 @@ hr string
      # Detailed Table Information
      Database: vt
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -378,7 +374,6 @@ hr string
      # Detailed Table Information
      Database: vt
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_skewed_table1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/create_skewed_table1.q.out b/ql/src/test/results/clientpositive/create_skewed_table1.q.out
    index e8d85a5..415bb77 100644
    --- a/ql/src/test/results/clientpositive/create_skewed_table1.q.out
    +++ b/ql/src/test/results/clientpositive/create_skewed_table1.q.out
    @@ -36,7 +36,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -69,7 +68,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -103,7 +101,6 @@ col3 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_view.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/create_view.q.out b/ql/src/test/results/clientpositive/create_view.q.out
    index 67e87b9..1038d01 100644
    --- a/ql/src/test/results/clientpositive/create_view.q.out
    +++ b/ql/src/test/results/clientpositive/create_view.q.out
    @@ -250,7 +250,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -300,7 +299,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -347,7 +345,6 @@ valoo string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -396,7 +393,6 @@ valoo string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -761,7 +757,6 @@ c string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -842,7 +837,6 @@ m int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -921,7 +915,6 @@ m int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -990,7 +983,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -1067,7 +1059,6 @@ boom int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -1140,7 +1131,6 @@ mycol int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -1226,7 +1216,6 @@ key int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -1348,7 +1337,6 @@ v2 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -1465,7 +1453,6 @@ value_count bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -1551,7 +1538,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_view_partitioned.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/create_view_partitioned.q.out b/ql/src/test/results/clientpositive/create_view_partitioned.q.out
    index ebf9a6b..caa2251 100644
    --- a/ql/src/test/results/clientpositive/create_view_partitioned.q.out
    +++ b/ql/src/test/results/clientpositive/create_view_partitioned.q.out
    @@ -71,7 +71,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -279,7 +278,6 @@ hr string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -393,7 +391,6 @@ v string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/create_view_translate.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/create_view_translate.q.out b/ql/src/test/results/clientpositive/create_view_translate.q.out
    index fd74058..886a01b 100644
    --- a/ql/src/test/results/clientpositive/create_view_translate.q.out
    +++ b/ql/src/test/results/clientpositive/create_view_translate.q.out
    @@ -29,7 +29,6 @@ key string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:
    @@ -75,7 +74,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      Table Type: VIRTUAL_VIEW
      Table Parameters:

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/ctas.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/ctas.q.out b/ql/src/test/results/clientpositive/ctas.q.out
    index 6f17a74..24cabc5 100644
    --- a/ql/src/test/results/clientpositive/ctas.q.out
    +++ b/ql/src/test/results/clientpositive/ctas.q.out
    @@ -146,7 +146,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -294,7 +293,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -442,7 +440,6 @@ conb string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -507,7 +504,6 @@ conb string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -656,7 +652,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/ctas_colname.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/ctas_colname.q.out b/ql/src/test/results/clientpositive/ctas_colname.q.out
    index 3ecfbf2..232d505 100644
    --- a/ql/src/test/results/clientpositive/ctas_colname.q.out
    +++ b/ql/src/test/results/clientpositive/ctas_colname.q.out
    @@ -103,7 +103,6 @@ c2 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -261,7 +260,6 @@ rr int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -449,7 +447,6 @@ lead1 string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -620,7 +617,6 @@ _c1 double
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -769,7 +765,6 @@ _c1 bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -1205,7 +1200,6 @@ _c1 bigint
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -1343,7 +1337,6 @@ key string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/ctas_hadoop20.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/ctas_hadoop20.q.out b/ql/src/test/results/clientpositive/ctas_hadoop20.q.out
    index b76028b..4af1e0a 100644
    --- a/ql/src/test/results/clientpositive/ctas_hadoop20.q.out
    +++ b/ql/src/test/results/clientpositive/ctas_hadoop20.q.out
    @@ -139,7 +139,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -283,7 +282,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -428,7 +426,6 @@ conb string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -493,7 +490,6 @@ conb string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -638,7 +634,6 @@ value string
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
    index 64d9cb3..554ae48 100644
    --- a/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
    +++ b/ql/src/test/results/clientpositive/ctas_uses_database_location.q.out
    @@ -138,7 +138,6 @@ value string
      # Detailed Table Information
      Database: db1
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/database_location.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/database_location.q.out b/ql/src/test/results/clientpositive/database_location.q.out
    index 159a8e2..797177d 100644
    --- a/ql/src/test/results/clientpositive/database_location.q.out
    +++ b/ql/src/test/results/clientpositive/database_location.q.out
    @@ -39,7 +39,6 @@ value int
      # Detailed Table Information
      Database: db1
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -110,7 +109,6 @@ value int
      # Detailed Table Information
      Database: db2
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/decimal_serde.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/decimal_serde.q.out b/ql/src/test/results/clientpositive/decimal_serde.q.out
    index d651799..0783d9a 100644
    --- a/ql/src/test/results/clientpositive/decimal_serde.q.out
    +++ b/ql/src/test/results/clientpositive/decimal_serde.q.out
    @@ -110,7 +110,6 @@ value int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -162,7 +161,6 @@ value int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/default_file_format.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/default_file_format.q.out b/ql/src/test/results/clientpositive/default_file_format.q.out
    index 1ffba08..3d5c20f 100644
    --- a/ql/src/test/results/clientpositive/default_file_format.q.out
    +++ b/ql/src/test/results/clientpositive/default_file_format.q.out
    @@ -55,7 +55,6 @@ c int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -85,7 +84,6 @@ c int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -115,7 +113,6 @@ c int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE
    @@ -145,7 +142,6 @@ c int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: EXTERNAL_TABLE
    @@ -176,7 +172,6 @@ c int
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/describe_comment_indent.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/describe_comment_indent.q.out b/ql/src/test/results/clientpositive/describe_comment_indent.q.out
    index 5ded495..3e0f45e 100644
    --- a/ql/src/test/results/clientpositive/describe_comment_indent.q.out
    +++ b/ql/src/test/results/clientpositive/describe_comment_indent.q.out
    @@ -56,7 +56,6 @@ col3 string col3
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out b/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out
    index 12f385d..b202e65 100644
    --- a/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out
    +++ b/ql/src/test/results/clientpositive/describe_comment_nonascii.q.out
    @@ -49,7 +49,6 @@ col3 string ��_col3
      # Detailed Table Information
      Database: default
      #### A masked pattern was here ####
    -Protect Mode: None
      Retention: 0
      #### A masked pattern was here ####
      Table Type: MANAGED_TABLE

    http://git-wip-us.apache.org/repos/asf/hive/blob/d6ec52ee/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out b/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out
    index 18768d9..2c8b0b0 100644
    --- a/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out
    +++ b/ql/src/test/results/clientpositive/describe_formatted_view_partitioned.q.out
    @@ -55,8 +55,6 @@ Partition Value: [val_86]
      Database: default
      Table: view_partitioned
      #### A masked pattern was here ####
    -Protect Mode: None
    -#### A masked pattern was here ####
      Partition Parameters:
      #### A masked pattern was here ####
      PREHOOK: query: DROP VIEW view_partitioned
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11158 Add tests for HPL/SQL (Dmitry Tolpeko via gates)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7338d8e1
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7338d8e1
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7338d8e1

    Branch: refs/heads/spark
    Commit: 7338d8e11983bfe7a63aadfd82b64adef765cb67
    Parents: 240097b
    Author: Alan Gates <gates@hortonworks.com>
    Authored: Wed Jul 15 17:09:12 2015 -0700
    Committer: Alan Gates <gates@hortonworks.com>
    Committed: Wed Jul 15 17:09:12 2015 -0700

    ----------------------------------------------------------------------
      hplsql/pom.xml | 9 +-
      .../main/java/org/apache/hive/hplsql/Exec.java | 11 +-
      .../org/apache/hive/hplsql/TestHplsqlLocal.java | 330 +++++++++++++++++++
      hplsql/src/test/queries/local/add.sql | 2 +
      hplsql/src/test/queries/local/assign.sql | 7 +
      hplsql/src/test/queries/local/bool_expr.sql | 47 +++
      hplsql/src/test/queries/local/break.sql | 10 +
      hplsql/src/test/queries/local/case.sql | 35 ++
      hplsql/src/test/queries/local/cast.sql | 4 +
      hplsql/src/test/queries/local/char.sql | 1 +
      hplsql/src/test/queries/local/coalesce.sql | 4 +
      hplsql/src/test/queries/local/concat.sql | 2 +
      .../src/test/queries/local/create_function.sql | 11 +
      .../src/test/queries/local/create_function2.sql | 11 +
      .../src/test/queries/local/create_procedure.sql | 9 +
      hplsql/src/test/queries/local/date.sql | 5 +
      hplsql/src/test/queries/local/dbms_output.sql | 6 +
      hplsql/src/test/queries/local/declare.sql | 16 +
      .../test/queries/local/declare_condition.sql | 8 +
      .../test/queries/local/declare_condition2.sql | 10 +
      hplsql/src/test/queries/local/decode.sql | 10 +
      hplsql/src/test/queries/local/equal.sql | 55 ++++
      hplsql/src/test/queries/local/exception.sql | 14 +
      hplsql/src/test/queries/local/exception2.sql | 10 +
      hplsql/src/test/queries/local/exception3.sql | 5 +
      hplsql/src/test/queries/local/exception4.sql | 7 +
      hplsql/src/test/queries/local/exception5.sql | 10 +
      hplsql/src/test/queries/local/exit.sql | 31 ++
      hplsql/src/test/queries/local/expr.sql | 21 ++
      hplsql/src/test/queries/local/for_range.sql | 20 ++
      hplsql/src/test/queries/local/if.sql | 68 ++++
      hplsql/src/test/queries/local/instr.sql | 49 +++
      hplsql/src/test/queries/local/interval.sql | 15 +
      hplsql/src/test/queries/local/lang.sql | 57 ++++
      hplsql/src/test/queries/local/leave.sql | 33 ++
      hplsql/src/test/queries/local/len.sql | 1 +
      hplsql/src/test/queries/local/length.sql | 1 +
      hplsql/src/test/queries/local/lower.sql | 1 +
      hplsql/src/test/queries/local/nvl.sql | 4 +
      hplsql/src/test/queries/local/nvl2.sql | 2 +
      hplsql/src/test/queries/local/print.sql | 5 +
      hplsql/src/test/queries/local/return.sql | 3 +
      hplsql/src/test/queries/local/seterror.sql | 10 +
      hplsql/src/test/queries/local/sub.sql | 1 +
      hplsql/src/test/queries/local/substr.sql | 2 +
      hplsql/src/test/queries/local/substring.sql | 8 +
      hplsql/src/test/queries/local/timestamp.sql | 4 +
      hplsql/src/test/queries/local/timestamp_iso.sql | 2 +
      hplsql/src/test/queries/local/to_char.sql | 1 +
      hplsql/src/test/queries/local/to_timestamp.sql | 5 +
      hplsql/src/test/queries/local/trim.sql | 1 +
      hplsql/src/test/queries/local/twopipes.sql | 1 +
      hplsql/src/test/queries/local/upper.sql | 1 +
      hplsql/src/test/queries/local/values_into.sql | 6 +
      hplsql/src/test/queries/local/while.sql | 20 ++
      hplsql/src/test/results/local/add.out.txt | 2 +
      hplsql/src/test/results/local/assign.out.txt | 8 +
      hplsql/src/test/results/local/bool_expr.out.txt | 32 ++
      hplsql/src/test/results/local/break.out.txt | 29 ++
      hplsql/src/test/results/local/case.out.txt | 12 +
      hplsql/src/test/results/local/cast.out.txt | 8 +
      hplsql/src/test/results/local/char.out.txt | 1 +
      hplsql/src/test/results/local/coalesce.out.txt | 4 +
      hplsql/src/test/results/local/concat.out.txt | 2 +
      .../test/results/local/create_function.out.txt | 9 +
      .../test/results/local/create_function2.out.txt | 10 +
      .../test/results/local/create_procedure.out.txt | 8 +
      hplsql/src/test/results/local/date.out.txt | 4 +
      .../src/test/results/local/dbms_output.out.txt | 3 +
      hplsql/src/test/results/local/declare.out.txt | 13 +
      .../results/local/declare_condition.out.txt | 7 +
      .../results/local/declare_condition2.out.txt | 12 +
      hplsql/src/test/results/local/decode.out.txt | 13 +
      hplsql/src/test/results/local/equal.out.txt | 48 +++
      hplsql/src/test/results/local/exception.out.txt | 13 +
      .../src/test/results/local/exception2.out.txt | 5 +
      hplsql/src/test/results/local/exit.out.txt | 42 +++
      hplsql/src/test/results/local/expr.out.txt | 29 ++
      hplsql/src/test/results/local/for_range.out.txt | 65 ++++
      hplsql/src/test/results/local/if.out.txt | 40 +++
      hplsql/src/test/results/local/instr.out.txt | 33 ++
      hplsql/src/test/results/local/interval.out.txt | 11 +
      hplsql/src/test/results/local/lang.out.txt | 34 ++
      hplsql/src/test/results/local/leave.out.txt | 42 +++
      hplsql/src/test/results/local/len.out.txt | 1 +
      hplsql/src/test/results/local/length.out.txt | 1 +
      hplsql/src/test/results/local/lower.out.txt | 1 +
      hplsql/src/test/results/local/nvl.out.txt | 4 +
      hplsql/src/test/results/local/nvl2.out.txt | 2 +
      .../test/results/local/plhqlexception.out.txt | 6 +
      .../test/results/local/plhqlexception1.out.txt | 10 +
      .../test/results/local/plhqlexception2.out.txt | 106 ++++++
      hplsql/src/test/results/local/print.out.txt | 6 +
      hplsql/src/test/results/local/return.out.txt | 3 +
      .../results/local/select_conversion.out.txt | 9 +
      hplsql/src/test/results/local/seterror.out.txt | 6 +
      hplsql/src/test/results/local/sub.out.txt | 1 +
      hplsql/src/test/results/local/substr.out.txt | 2 +
      hplsql/src/test/results/local/substring.out.txt | 8 +
      hplsql/src/test/results/local/timestamp.out.txt | 4 +
      .../test/results/local/timestamp_iso.out.txt | 2 +
      hplsql/src/test/results/local/to_char.out.txt | 1 +
      .../src/test/results/local/to_timestamp.out.txt | 4 +
      hplsql/src/test/results/local/trim.out.txt | 1 +
      hplsql/src/test/results/local/twopipes.out.txt | 1 +
      hplsql/src/test/results/local/upper.out.txt | 1 +
      .../src/test/results/local/values_into.out.txt | 11 +
      hplsql/src/test/results/local/while.out.txt | 72 ++++
      108 files changed, 1821 insertions(+), 3 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/pom.xml
    ----------------------------------------------------------------------
    diff --git a/hplsql/pom.xml b/hplsql/pom.xml
    index d096e90..fc1c527 100644
    --- a/hplsql/pom.xml
    +++ b/hplsql/pom.xml
    @@ -58,7 +58,6 @@
            <artifactId>commons-logging</artifactId>
            <version>${commons-logging.version}</version>
          </dependency>
    -
          <dependency>
            <groupId>commons-io</groupId>
            <artifactId>commons-io</artifactId>
    @@ -74,6 +73,12 @@
             <artifactId>antlr4-runtime</artifactId>
             <version>4.5</version>
          </dependency>
    + <dependency>
    + <groupId>junit</groupId>
    + <artifactId>junit</artifactId>
    + <version>${junit.version}</version>
    + <scope>test</scope>
    + </dependency>
        </dependencies>

        <profiles>
    @@ -103,7 +108,7 @@

        <build>
          <plugins>
    - <plugin>
    + <plugin>
              <groupId>org.antlr</groupId>
                <artifactId>antlr4-maven-plugin</artifactId>
                <version>4.5</version>

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
    index 9ec8959..40fdc82 100644
    --- a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
    +++ b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java
    @@ -1644,7 +1644,16 @@ public class Exec extends HplsqlBaseVisitor<Integer> {
         */
        @Override
        public Integer visitLabel(HplsqlParser.LabelContext ctx) {
    - exec.labels.push(ctx.L_ID().toString());
    + if (ctx.L_ID() != null) {
    + exec.labels.push(ctx.L_ID().toString());
    + }
    + else {
    + String label = ctx.L_LABEL().getText();
    + if (label.endsWith(":")) {
    + label = label.substring(0, label.length() - 1);
    + }
    + exec.labels.push(label);
    + }
          return 0;
        }


    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
    new file mode 100644
    index 0000000..ee2be66
    --- /dev/null
    +++ b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java
    @@ -0,0 +1,330 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + *
    + * http://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package org.apache.hive.hplsql;
    +
    +import java.io.BufferedReader;
    +import java.io.ByteArrayOutputStream;
    +import java.io.PrintStream;
    +import java.io.StringReader;
    +import org.apache.commons.io.FileUtils;
    +import org.junit.Assert;
    +import org.junit.Test;
    +
    +/**
    + * Unit tests for HPL/SQL (no Hive connection required)
    + */
    +public class TestHplsqlLocal {
    +
    + private final ByteArrayOutputStream out = new ByteArrayOutputStream();
    +
    + @Test
    + public void testAdd() throws Exception {
    + run("add");
    + }
    +
    + @Test
    + public void testAssign() throws Exception {
    + run("assign");
    + }
    +
    + @Test
    + public void testBoolExpr() throws Exception {
    + run("bool_expr");
    + }
    +
    + @Test
    + public void testBreak() throws Exception {
    + run("break");
    + }
    +
    + @Test
    + public void testCase() throws Exception {
    + run("case");
    + }
    +
    + @Test
    + public void testCast() throws Exception {
    + run("cast");
    + }
    +
    + @Test
    + public void testChar() throws Exception {
    + run("char");
    + }
    +
    + @Test
    + public void testCoalesce() throws Exception {
    + run("coalesce");
    + }
    +
    + @Test
    + public void testConcat() throws Exception {
    + run("concat");
    + }
    +
    + @Test
    + public void testCreateFunction() throws Exception {
    + run("create_function");
    + }
    +
    + @Test
    + public void testCreateFunction2() throws Exception {
    + run("create_function2");
    + }
    +
    + @Test
    + public void testCreateProcedure() throws Exception {
    + run("create_procedure");
    + }
    +
    + @Test
    + public void testDate() throws Exception {
    + run("date");
    + }
    +
    + @Test
    + public void testDbmsOutput() throws Exception {
    + run("dbms_output");
    + }
    +
    + @Test
    + public void testDeclare() throws Exception {
    + run("declare");
    + }
    +
    + @Test
    + public void testDeclareCondition() throws Exception {
    + run("declare_condition");
    + }
    +
    + @Test
    + public void testDeclareCondition2() throws Exception {
    + run("declare_condition2");
    + }
    +
    + @Test
    + public void testDecode() throws Exception {
    + run("decode");
    + }
    +
    + @Test
    + public void testEqual() throws Exception {
    + run("equal");
    + }
    +
    + @Test
    + public void testException() throws Exception {
    + run("exception");
    + }
    +
    + @Test
    + public void testException2() throws Exception {
    + run("exception2");
    + }
    +
    + @Test
    + public void testException3() throws Exception {
    + run("exception2");
    + }
    +
    + @Test
    + public void testException4() throws Exception {
    + run("exception2");
    + }
    +
    + @Test
    + public void testException5() throws Exception {
    + run("exception2");
    + }
    +
    + @Test
    + public void testExit() throws Exception {
    + run("exit");
    + }
    +
    + @Test
    + public void testExpr() throws Exception {
    + run("expr");
    + }
    +
    + @Test
    + public void testForRange() throws Exception {
    + run("for_range");
    + }
    +
    + @Test
    + public void testIf() throws Exception {
    + run("if");
    + }
    +
    + @Test
    + public void testInstr() throws Exception {
    + run("instr");
    + }
    +
    + @Test
    + public void testInterval() throws Exception {
    + run("interval");
    + }
    +
    + @Test
    + public void testLang() throws Exception {
    + run("lang");
    + }
    +
    + @Test
    + public void testLeave() throws Exception {
    + run("leave");
    + }
    +
    + @Test
    + public void testLength() throws Exception {
    + run("length");
    + }
    +
    + @Test
    + public void testLen() throws Exception {
    + run("len");
    + }
    +
    + @Test
    + public void testLower() throws Exception {
    + run("lower");
    + }
    +
    + @Test
    + public void testNvl() throws Exception {
    + run("nvl");
    + }
    +
    + @Test
    + public void testNvl2() throws Exception {
    + run("nvl2");
    + }
    +
    + @Test
    + public void testPrint() throws Exception {
    + run("print");
    + }
    +
    + @Test
    + public void testReturn() throws Exception {
    + run("return");
    + }
    +
    + @Test
    + public void testSetError() throws Exception {
    + run("seterror");
    + }
    +
    + @Test
    + public void testSub() throws Exception {
    + run("sub");
    + }
    +
    + @Test
    + public void testSubstring() throws Exception {
    + run("substring");
    + }
    +
    + @Test
    + public void testSubstr() throws Exception {
    + run("substr");
    + }
    +
    + @Test
    + public void testTimestampIso() throws Exception {
    + run("timestamp_iso");
    + }
    +
    + @Test
    + public void testTimestamp() throws Exception {
    + run("timestamp");
    + }
    +
    + @Test
    + public void testToChar() throws Exception {
    + run("to_char");
    + }
    +
    + @Test
    + public void testToTimestamp() throws Exception {
    + run("to_timestamp");
    + }
    +
    + @Test
    + public void testTrim() throws Exception {
    + run("trim");
    + }
    +
    + @Test
    + public void testTwoPipes() throws Exception {
    + run("twopipes");
    + }
    +
    + @Test
    + public void testUpper() throws Exception {
    + run("upper");
    + }
    +
    + @Test
    + public void testValuesInto() throws Exception {
    + run("values_into");
    + }
    +
    + @Test
    + public void testWhile() throws Exception {
    + run("while");
    + }
    +
    + /**
    + * Run a test file
    + */
    + void run(String testFile) throws Exception {
    + System.setOut(new PrintStream(out));
    + Exec exec = new Exec();
    + String[] args = { "-f", "src/test/queries/local/" + testFile + ".sql", "-trace" };
    + exec.init(args);
    + Var result = exec.run();
    + if (result != null) {
    + System.out.println(result.toString());
    + }
    + String s = getTestOutput(out.toString()).trim();
    + FileUtils.writeStringToFile(new java.io.File("target/tmp/log/" + testFile + ".out.txt"), s);
    + String t = FileUtils.readFileToString(new java.io.File("src/test/results/local/" + testFile + ".out.txt"), "utf-8").trim();
    + System.setOut(null);
    + Assert.assertEquals(s, t);
    + }
    +
    + /**
    + * Get test output
    + */
    + String getTestOutput(String s) throws Exception {
    + StringBuilder sb = new StringBuilder();
    + BufferedReader reader = new BufferedReader(new StringReader(s));
    + String line = null;
    + while ((line = reader.readLine()) != null) {
    + if (!line.startsWith("log4j:")) {
    + sb.append(line);
    + sb.append("\n");
    + }
    + }
    + return sb.toString();
    + }
    +}

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/add.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/add.sql b/hplsql/src/test/queries/local/add.sql
    new file mode 100644
    index 0000000..2861a5f
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/add.sql
    @@ -0,0 +1,2 @@
    +DATE '2014-12-31' + 1;
    +1 + DATE '2014-12-31';
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/assign.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/assign.sql b/hplsql/src/test/queries/local/assign.sql
    new file mode 100644
    index 0000000..67e6893
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/assign.sql
    @@ -0,0 +1,7 @@
    +code := 'A';
    +status := 1;
    +count = 0;
    +
    +SET code = 'A';
    +SET status = 1, count = 0;
    +SET (count, limit) = (0, 100);
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/bool_expr.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/bool_expr.sql b/hplsql/src/test/queries/local/bool_expr.sql
    new file mode 100644
    index 0000000..098096f
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/bool_expr.sql
    @@ -0,0 +1,47 @@
    +IF 1=1 THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'FAILED';
    +END IF;
    +
    +IF 1=1 OR 2=2 THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'FAILED';
    +END IF;
    +
    +IF (1=1 OR 2=2) THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'FAILED';
    +END IF;
    +
    +IF (1=1 AND 2=2 AND 3=4) THEN
    + PRINT 'FAILED';
    +ELSE
    + PRINT 'Correct';
    +END IF;
    +
    +IF ((1=1) AND (2=2)) THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'FAILED';
    +END IF;
    +
    +IF (1=1 AND (2=2)) THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'FAILED';
    +END IF;
    +
    +IF ((1=1) AND 2=2 AND 3=3) THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'FAILED';
    +END IF;
    +
    +IF ((1=1 OR 2=2) AND 2=2 AND 3=3 AND (1=2 OR 2=3)) THEN
    + PRINT 'FAILED';
    +ELSE
    + PRINT 'Correct';
    +END IF;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/break.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/break.sql b/hplsql/src/test/queries/local/break.sql
    new file mode 100644
    index 0000000..c53535d
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/break.sql
    @@ -0,0 +1,10 @@
    +DECLARE count INT DEFAULT 3;
    +WHILE 1=1 BEGIN
    + PRINT 'Start of while block';
    + PRINT count;
    + SET count = count - 1;
    + IF count = 0
    + BREAK;
    + PRINT 'End of while block';
    +END
    +PRINT 'End of script';
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/case.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/case.sql b/hplsql/src/test/queries/local/case.sql
    new file mode 100644
    index 0000000..5bbdda9
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/case.sql
    @@ -0,0 +1,35 @@
    +PRINT CASE 1
    + WHEN 0 THEN 'FAILED'
    + WHEN 1 THEN 'Correct'
    + WHEN 2 THEN 'FAILED'
    + ELSE 'FAILED'
    + END
    +
    +PRINT CASE 3
    + WHEN 0 THEN 'FAILED'
    + WHEN 1 THEN 'FAILED'
    + ELSE 'Correct'
    + END
    +
    +PRINT NVL2(CASE 3
    + WHEN 0 THEN 'FAILED'
    + WHEN 1 THEN 'FAILED'
    + END, 'FAILED', 'Correct')
    +
    +PRINT CASE
    + WHEN 1=0 THEN 'FAILED'
    + WHEN 1=1 THEN 'Correct'
    + WHEN 1=2 THEN 'FAILED'
    + ELSE 'FAILED'
    + END
    +
    +PRINT CASE
    + WHEN 3=0 THEN 'FAILED'
    + WHEN 3=1 THEN 'FAILED'
    + ELSE 'Correct'
    + END
    +
    +PRINT NVL2(CASE
    + WHEN 3=0 THEN 'FAILED'
    + WHEN 3=1 THEN 'FAILED'
    + END, 'FAILED', 'Correct')
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/cast.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/cast.sql b/hplsql/src/test/queries/local/cast.sql
    new file mode 100644
    index 0000000..3adab22
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/cast.sql
    @@ -0,0 +1,4 @@
    +CAST('Abc' AS CHAR(1));
    +CAST('Abc' AS VARCHAR(2));
    +CAST('Abc' AS CHAR);
    +CAST(TIMESTAMP '2015-03-12 10:58:34.111' AS CHAR(10))

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/char.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/char.sql b/hplsql/src/test/queries/local/char.sql
    new file mode 100644
    index 0000000..2a4f779
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/char.sql
    @@ -0,0 +1 @@
    +CHAR(1000)
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/coalesce.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/coalesce.sql b/hplsql/src/test/queries/local/coalesce.sql
    new file mode 100644
    index 0000000..4b65d58
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/coalesce.sql
    @@ -0,0 +1,4 @@
    +COALESCE('First non-null', 1);
    +COALESCE(NULL, 'First non-null');
    +COALESCE(NULL, 'First non-null', 1);
    +COALESCE(NULL, NULL, 'First non-null', 1);
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/concat.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/concat.sql b/hplsql/src/test/queries/local/concat.sql
    new file mode 100644
    index 0000000..b7769bb
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/concat.sql
    @@ -0,0 +1,2 @@
    +CONCAT('a', 'b', NULL, 'c');
    +NVL(CONCAT(NULL, NULL, NULL), 'NULL Value');
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/create_function.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/create_function.sql b/hplsql/src/test/queries/local/create_function.sql
    new file mode 100644
    index 0000000..96bf290
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/create_function.sql
    @@ -0,0 +1,11 @@
    +CREATE FUNCTION hello()
    + RETURNS STRING
    +BEGIN
    + PRINT 'Start';
    + RETURN 'Hello, world';
    + PRINT 'Must not be printed';
    +END;
    +
    +-- Call the function
    +PRINT hello() || '!';
    +PRINT 'End of script';
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/create_function2.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/create_function2.sql b/hplsql/src/test/queries/local/create_function2.sql
    new file mode 100644
    index 0000000..744ea9e
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/create_function2.sql
    @@ -0,0 +1,11 @@
    +CREATE FUNCTION hello2(text STRING)
    + RETURNS STRING
    +BEGIN
    + PRINT 'Start';
    + RETURN 'Hello, ' || text || '!';
    + PRINT 'Must not be printed';
    +END;
    +
    +-- Call the function
    +PRINT hello2('wor' || 'ld');
    +PRINT 'End of script';
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/create_procedure.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/create_procedure.sql b/hplsql/src/test/queries/local/create_procedure.sql
    new file mode 100644
    index 0000000..28088a2
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/create_procedure.sql
    @@ -0,0 +1,9 @@
    +CREATE PROCEDURE set_message(IN name STRING, OUT result STRING)
    +BEGIN
    + SET result = 'Hello, ' || name || '!';
    +END;
    +
    +DECLARE str STRING;
    +CALL set_message('world', str);
    +PRINT str;
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/date.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/date.sql b/hplsql/src/test/queries/local/date.sql
    new file mode 100644
    index 0000000..2ef4743
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/date.sql
    @@ -0,0 +1,5 @@
    +DATE '2014-12-20'
    +
    +DATE('2015-03-12');
    +DATE('2015' || '-03-' || '12');
    +DATE(TIMESTAMP '2015-03-12 10:58:34.111');
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/dbms_output.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/dbms_output.sql b/hplsql/src/test/queries/local/dbms_output.sql
    new file mode 100644
    index 0000000..37d7313
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/dbms_output.sql
    @@ -0,0 +1,6 @@
    +DECLARE
    + str VARCHAR(200) DEFAULT 'Hello, world!';
    +BEGIN
    + DBMS_OUTPUT.PUT_LINE('Hello, world!');
    + DBMS_OUTPUT.PUT_LINE(str);
    +END;

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/declare.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/declare.sql b/hplsql/src/test/queries/local/declare.sql
    new file mode 100644
    index 0000000..fd02da9
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/declare.sql
    @@ -0,0 +1,16 @@
    +DECLARE
    + code CHAR(10);
    + status INT := 1;
    + count SMALLINT = 0;
    + limit INT DEFAULT 100;
    + f UTL_FILE.FILE_TYPE;
    +BEGIN
    + status := 2;
    +END;
    +
    +DECLARE code CHAR(10);
    +DECLARE status, status2 INT DEFAULT 1;
    +DECLARE count SMALLINT, limit INT DEFAULT 100;
    +
    +DECLARE dt DATE DEFAULT '2015-05-13';
    +DECLARE ts TIMESTAMP DEFAULT '2015-05-13 11:10:01';
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/declare_condition.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/declare_condition.sql b/hplsql/src/test/queries/local/declare_condition.sql
    new file mode 100644
    index 0000000..8739499
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/declare_condition.sql
    @@ -0,0 +1,8 @@
    +DECLARE cnt_condition CONDITION;
    +DECLARE EXIT HANDLER FOR cnt_condition
    + PRINT 'Condition raised';
    +IF 1 <> 2 THEN
    + SIGNAL cnt_condition;
    +END IF;
    +PRINT 'Must not be printed 1';
    +PRINT 'Must not be printed 2';
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/declare_condition2.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/declare_condition2.sql b/hplsql/src/test/queries/local/declare_condition2.sql
    new file mode 100644
    index 0000000..d6a6461
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/declare_condition2.sql
    @@ -0,0 +1,10 @@
    +DECLARE cnt_condition CONDITION;
    +DECLARE CONTINUE HANDLER FOR cnt_condition
    + PRINT 'Wrong condition';
    +DECLARE CONTINUE HANDLER FOR cnt_condition2
    + PRINT 'Condition raised';
    +IF 1 <> 2 THEN
    + SIGNAL cnt_condition2;
    +END IF;
    +PRINT 'Executed 1';
    +PRINT 'Executed 2';
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/decode.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/decode.sql b/hplsql/src/test/queries/local/decode.sql
    new file mode 100644
    index 0000000..a9f7c0c
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/decode.sql
    @@ -0,0 +1,10 @@
    +DECLARE var1 INT DEFAULT 3;
    +PRINT DECODE (var1, 1, 'A', 2, 'B', 3, 'C');
    +PRINT DECODE (var1, 1, 'A', 2, 'B', 'C');
    +
    +SET var1 := 1;
    +PRINT DECODE (var1, 1, 'A', 2, 'B', 3, 'C');
    +
    +SET var1 := NULL;
    +PRINT DECODE (var1, 1, 'A', 2, 'B', NULL, 'C');
    +PRINT DECODE (var1, 1, 'A', 2, 'B', 'C');

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/equal.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/equal.sql b/hplsql/src/test/queries/local/equal.sql
    new file mode 100644
    index 0000000..0de2801
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/equal.sql
    @@ -0,0 +1,55 @@
    +PRINT 'Case 1 = 1';
    +IF 1 = 1 THEN
    + PRINT 'Equal - Correct';
    +ELSE
    + PRINT 'Not equal - Incorrect';
    +END IF;
    +
    +PRINT 'Case 1 == 1';
    +IF 1 == 1 THEN
    + PRINT 'Equal - Correct';
    +ELSE
    + PRINT 'Not equal - Incorrect';
    +END IF;
    +
    +PRINT 'Case 1 <> 3';
    +IF 1 <> 3 THEN
    + PRINT 'Not equal - Correct';
    +ELSE
    + PRINT 'Equal - Incorrect';
    +END IF;
    +
    +PRINT 'Case 1 != 3';
    +IF 1 != 3 THEN
    + PRINT 'Not equal - Correct';
    +ELSE
    + PRINT 'Equal - Incorrect';
    +END IF;
    +
    +PRINT 'Case 3 > 1';
    +IF 3 > 1 THEN
    + PRINT 'Greater - Correct';
    +ELSE
    + PRINT 'Greater - Incorrect';
    +END IF;
    +
    +PRINT 'Case 1 < 3';
    +IF 1 < 3 THEN
    + PRINT 'Less - Correct';
    +ELSE
    + PRINT 'Less - Incorrect';
    +END IF;
    +
    +PRINT 'Case 3 >= 1';
    +IF 3 >= 1 THEN
    + PRINT 'Greater or equal - Correct';
    +ELSE
    + PRINT 'Greater or equal - Incorrect';
    +END IF;
    +
    +PRINT 'Case 1 <= 3';
    +IF 1 <= 3 THEN
    + PRINT 'Less or equal - Correct';
    +ELSE
    + PRINT 'Less or equal - Incorrect';
    +END IF;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/exception.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/exception.sql b/hplsql/src/test/queries/local/exception.sql
    new file mode 100644
    index 0000000..7ce7377
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/exception.sql
    @@ -0,0 +1,14 @@
    +BEGIN
    + PRINT 'Correct';
    + WHILE 1=1 THEN
    + FETCH cur INTO v;
    + PRINT 'Incorrect - unreachable code, unknown cursor name, exception must be raised';
    + END WHILE;
    +EXCEPTION WHEN OTHERS THEN
    + PRINT 'Correct';
    + PRINT 'Correct';
    + PRINT 'Correct - Exception raised';
    + WHEN NO_DATA_FOUND THEN
    + PRINT 'Correct';
    +END
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/exception2.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/exception2.sql b/hplsql/src/test/queries/local/exception2.sql
    new file mode 100644
    index 0000000..3394da8
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/exception2.sql
    @@ -0,0 +1,10 @@
    +DECLARE
    + v VARCHAR(200);
    +BEGIN
    + OPEN cur FOR 'SELECT c1 FROM t1';
    + FETCH cur INTO v;
    + CLOSE cur;
    +EXCEPTION WHEN OTHERS THEN
    + DBMS_OUTPUT.PUT_LINE('Error');
    +END
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/exception3.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/exception3.sql b/hplsql/src/test/queries/local/exception3.sql
    new file mode 100644
    index 0000000..a12b853
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/exception3.sql
    @@ -0,0 +1,5 @@
    +PRINT 'Correct';
    +WHILE 1=1 THEN
    +FETCH cur INTO v;
    +PRINT 'Incorrect - unreachable code, unknown cursor name, exception must be raised';
    +END WHILE;

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/exception4.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/exception4.sql b/hplsql/src/test/queries/local/exception4.sql
    new file mode 100644
    index 0000000..38d89b5
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/exception4.sql
    @@ -0,0 +1,7 @@
    +PRINT 'Correct';
    +DECLARE EXIT HANDLER FOR SQLEXCEPTION
    + PRINT 'Correct - Exception raised';
    +WHILE 1=1 THEN
    +FETCH cur INTO v;
    +PRINT 'Incorrect - unreachable code, unknown cursor name, exception must be raised';
    +END WHILE;

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/exception5.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/exception5.sql b/hplsql/src/test/queries/local/exception5.sql
    new file mode 100644
    index 0000000..6232984
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/exception5.sql
    @@ -0,0 +1,10 @@
    +DECLARE cnt INT := 0;
    +PRINT 'Correct';
    +DECLARE CONTINUE HANDLER FOR SQLEXCEPTION
    + PRINT 'Correct - Exception raised';
    +WHILE cnt < 10 THEN
    +FETCH cur INTO v;
    +PRINT cnt;
    +PRINT 'Correct - exception handled';
    +SET cnt = cnt + 1;
    +END WHILE;

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/exit.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/exit.sql b/hplsql/src/test/queries/local/exit.sql
    new file mode 100644
    index 0000000..d0e432b
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/exit.sql
    @@ -0,0 +1,31 @@
    +DECLARE count INT DEFAULT 3;
    +
    +WHILE 1=1 LOOP
    + PRINT 'Start of while block';
    + PRINT count;
    + count := count - 1;
    + EXIT WHEN count = 0;
    + PRINT 'End of while block';
    +END LOOP;
    +
    +count := 3;
    +
    +<<lbl>>
    +WHILE 1=1 LOOP
    + PRINT 'Start of outer while block';
    +
    + WHILE 1=1 LOOP
    + PRINT 'Start of 1st inner while block';
    + EXIT;
    + PRINT 'End of 1st inner while block (NEVER SHOWN)';
    + END LOOP;
    +
    + <<lbl2>>
    + WHILE 1=1 LOOP
    + PRINT 'Start of 2nd inner while block';
    + EXIT lbl;
    + PRINT 'End of 2nd inner while block (NEVER SHOWN)';
    + END LOOP;
    + PRINT 'End of outer while block (NEVER SHOWN)';
    +END LOOP;
    +PRINT 'End of script';
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/expr.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/expr.sql b/hplsql/src/test/queries/local/expr.sql
    new file mode 100644
    index 0000000..33388a2
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/expr.sql
    @@ -0,0 +1,21 @@
    +PRINT 'a' || 'b';
    +PRINT 'a' || 1 || 'b';
    +PRINT 1 || 'a' || 'b';
    +PRINT 'a' || null || 'b';
    +PRINT null || 'a' || 'b';
    +PRINT null || null;
    +
    +DECLARE c INT;
    +
    +PRINT 'Integer increment';
    +c := 3;
    +c := c + 1;
    +PRINT c;
    +
    +PRINT 'Integer decrement';
    +c := 3;
    +c := c - 1;
    +PRINT c;
    +
    +PRINT NVL(null - 3, 'Correct');
    +PRINT NVL(null + 3, 'Correct');
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/for_range.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/for_range.sql b/hplsql/src/test/queries/local/for_range.sql
    new file mode 100644
    index 0000000..b7af115
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/for_range.sql
    @@ -0,0 +1,20 @@
    +DECLARE i INT = 3;
    +PRINT i;
    +
    +FOR i IN 1..10 LOOP
    + PRINT i;
    +END LOOP;
    +
    +PRINT i;
    +
    +FOR i IN REVERSE 10..1 LOOP
    + PRINT i;
    +END LOOP;
    +
    +PRINT i;
    +
    +FOR i IN 1..10 BY 2 LOOP
    + PRINT i;
    +END LOOP;
    +
    +PRINT i;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/if.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/if.sql b/hplsql/src/test/queries/local/if.sql
    new file mode 100644
    index 0000000..2de3045
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/if.sql
    @@ -0,0 +1,68 @@
    +DECLARE state VARCHAR;
    +DECLARE count INT;
    +
    +SET state = 'CA';
    +SET count = 1;
    +
    +/*IF count = 1 THEN
    + PRINT 'True block - Correct';
    +END IF;*/
    +
    +IF state = 'CA' THEN
    + PRINT 'True block - Correct';
    +ELSE
    + PRINT 'False block - Incorrect';
    +END IF;
    +
    +IF state = 'MA' THEN
    + PRINT 'True block - Incorrect';
    +ELSE
    + PRINT 'False block - Correct';
    +END IF;
    +
    +IF count = 4 THEN
    + PRINT 'True block - Incorrect';
    +ELSIF count = 3 THEN
    + PRINT 'True block - Incorrect';
    +ELSIF count = 2 THEN
    + PRINT 'True block - Incorrect';
    +ELSE
    + PRINT 'False block - Correct';
    +END IF;
    +
    +IF count = 3 THEN
    + PRINT 'True block - Incorrect';
    +ELSIF count = 2 THEN
    + PRINT 'True block - Incorrect';
    +ELSIF count = 1 THEN
    + PRINT 'True block - Correct';
    +ELSE
    + PRINT 'False block - Incorrect';
    +END IF;
    +
    +PRINT 'IS NOT NULL AND BETWEEN';
    +IF 1 IS NOT NULL AND 1 BETWEEN 0 AND 100 THEN
    + PRINT 'True block - Correct';
    +ELSE
    + PRINT 'False block - Incorrect';
    +END IF;
    +
    +PRINT 'Transact-SQL - Single statement';
    +
    +IF state = 'CA'
    + PRINT 'True block - Correct';
    +ELSE
    + PRINT 'False block - Incorrect';
    +
    +PRINT 'Transact-SQL - BEGIN-END block';
    +
    +IF state = 'CA'
    +BEGIN
    + PRINT 'True block - Correct';
    + PRINT 'True block - Correct';
    +END
    +ELSE
    +BEGIN
    + PRINT 'False block - Incorrect';
    + PRINT 'False block - Incorrect';
    +END
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/instr.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/instr.sql b/hplsql/src/test/queries/local/instr.sql
    new file mode 100644
    index 0000000..9cd8dca
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/instr.sql
    @@ -0,0 +1,49 @@
    +IF INSTR('abc', 'b') = 2 THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'Failed';
    +END IF;
    +
    +IF INSTR('abcabc', 'b', 3) = 5 THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'Failed';
    +END IF;
    +
    +IF INSTR('abcabcabc', 'b', 3, 2) = 8 THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'Failed';
    +END IF;
    +
    +IF INSTR('abcabcabc', 'b', -3) = 5 THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'Failed';
    +END IF;
    +
    +IF INSTR('abcabcabc', 'b', -3, 2) = 2 THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'Failed';
    +END IF;
    +
    +DECLARE c STRING;
    +
    +IF INSTR(c, 'b') IS NULL THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'Failed';
    +END IF;
    +
    +IF INSTR(NULL, 'b') IS NULL THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'Failed';
    +END IF;
    +
    +IF INSTR('', 'b') = 0 THEN
    + PRINT 'Correct';
    +ELSE
    + PRINT 'Failed';
    +END IF;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/interval.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/interval.sql b/hplsql/src/test/queries/local/interval.sql
    new file mode 100644
    index 0000000..7962f2d
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/interval.sql
    @@ -0,0 +1,15 @@
    +DATE '2015-03-12' + 1 DAY;
    +TIMESTAMP '2015-03-12' + 1 DAY;
    +TIMESTAMP '2015-03-12 10:10:10.000' + 1 MICROSECOND;
    +
    +DATE '2015-03-12' + NVL(NULL, 3) DAYS;
    +TIMESTAMP '2015-03-12' + NVL(NULL, 3) DAYS;
    +
    +DATE '2015-03-12' - 1 DAY;
    +TIMESTAMP '2015-03-12' - 1 DAY;
    +TIMESTAMP '2015-03-12 10:10:10.000' - 1 MICROSECOND;
    +
    +DATE '2015-03-12' - NVL(NULL, 3) DAYS;
    +TIMESTAMP '2015-03-12' - NVL(NULL, 3) DAYS;
    +
    +TIMESTAMP '2015-03-12' - 1 DAY - 1 MICROSECOND;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/lang.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/lang.sql b/hplsql/src/test/queries/local/lang.sql
    new file mode 100644
    index 0000000..56f8c33
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/lang.sql
    @@ -0,0 +1,57 @@
    +-- Integer literals
    ++1;
    +1;
    +0;
    +-1;
    +
    +-- Decimal literals
    +1.0;
    ++1.0;
    +-1.0;
    +-- 1.;
    +-- +1.;
    +-- -1.;
    +-- .1;
    +-- +.1;
    +-- -.1;
    +
    +-- Identifiers
    +declare abc int;
    +declare abc.abc int;
    +declare abc . abc1 int;
    +declare "abc" int;
    +declare "abc".abc int;
    +declare "abc"."abc" int;
    +declare "abc" . "abc1" int;
    +declare [abc] int;
    +declare [abc].abc int;
    +declare [abc].[abc] int;
    +declare [abc] . [abc1] int;
    +declare `abc` int;
    +declare `abc`.abc int;
    +declare `abc`.`abc` int;
    +declare `abc` . `abc1` int;
    +declare :new.abc int;
    +declare @abc int;
    +declare _abc int;
    +declare #abc int;
    +declare ##abc int;
    +declare $abc int;
    +declare abc_9 int;
    +
    +-- Operators and expressions
    ++1 + 1; -- 2
    +1 + 1; -- 2
    +1 + -1; -- 0
    +-- 'a' + 'b'; -- ab
    +-- 'a''b' + 'c'; -- ab''c
    +-- 'a\'b' + 'c'; -- ab\'c
    +-- 1 + '1' -- 2
    +-- '1' + 1 -- 2
    +-- 1 + 'a' -- 1a
    +-- 'a' + 1 -- a1
    +
    +-1 - 1; -- -2
    +-1 - -1; -- 0
    +
    +
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/leave.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/leave.sql b/hplsql/src/test/queries/local/leave.sql
    new file mode 100644
    index 0000000..a4fc0d5
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/leave.sql
    @@ -0,0 +1,33 @@
    +DECLARE count INT DEFAULT 3;
    +lbl:
    +WHILE 1=1 DO
    + PRINT 'Start of while block';
    + PRINT count;
    + SET count = count - 1;
    + IF count = 0 THEN
    + LEAVE lbl;
    + END IF;
    + PRINT 'End of while block';
    +END WHILE;
    +
    +SET count = 3;
    +
    +lbl3:
    +WHILE 1=1 DO
    + PRINT 'Start of outer while block';
    +
    + lbl1:
    + WHILE 1=1 DO
    + PRINT 'Start of 1st inner while block';
    + LEAVE lbl1;
    + PRINT 'End of 1st inner while block (NEVER SHOWN)';
    + END WHILE;
    +
    + lbl2:
    + WHILE 1=1 DO
    + PRINT 'Start of 2nd inner while block';
    + LEAVE lbl3;
    + PRINT 'End of 2nd inner while block (NEVER SHOWN)';
    + END WHILE;
    + PRINT 'End of outer while block (NEVER SHOWN)';
    +END WHILE;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/len.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/len.sql b/hplsql/src/test/queries/local/len.sql
    new file mode 100644
    index 0000000..9851c49
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/len.sql
    @@ -0,0 +1 @@
    +LEN('Abc ');
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/length.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/length.sql b/hplsql/src/test/queries/local/length.sql
    new file mode 100644
    index 0000000..42cf3cc
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/length.sql
    @@ -0,0 +1 @@
    +LENGTH('Abc ');
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/lower.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/lower.sql b/hplsql/src/test/queries/local/lower.sql
    new file mode 100644
    index 0000000..f29b0e9
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/lower.sql
    @@ -0,0 +1 @@
    +LOWER('ABC');
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/nvl.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/nvl.sql b/hplsql/src/test/queries/local/nvl.sql
    new file mode 100644
    index 0000000..1a843bc
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/nvl.sql
    @@ -0,0 +1,4 @@
    +NVL('First non-null', 1);
    +NVL(NULL, 'First non-null');
    +NVL(NULL, 'First non-null', 1);
    +NVL(NULL, NULL, 'First non-null', 1);
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/nvl2.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/nvl2.sql b/hplsql/src/test/queries/local/nvl2.sql
    new file mode 100644
    index 0000000..70eeccb
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/nvl2.sql
    @@ -0,0 +1,2 @@
    +NVL2('A', 'Correct', 'FAILED');
    +NVL2(NULL, 'FAILED', 'Correct');

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/print.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/print.sql b/hplsql/src/test/queries/local/print.sql
    new file mode 100644
    index 0000000..095682b
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/print.sql
    @@ -0,0 +1,5 @@
    +PRINT 1;
    +PRINT 'abc';
    +PRINT ('abc');
    +
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/return.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/return.sql b/hplsql/src/test/queries/local/return.sql
    new file mode 100644
    index 0000000..c52e5c5
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/return.sql
    @@ -0,0 +1,3 @@
    +PRINT 'Before return';
    +RETURN;
    +PRINT 'Unreachable code';
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/seterror.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/seterror.sql b/hplsql/src/test/queries/local/seterror.sql
    new file mode 100644
    index 0000000..4705677
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/seterror.sql
    @@ -0,0 +1,10 @@
    +BEGIN
    +SET plhql.onerror = SETERROR;
    +
    +HOST 'abcd';
    +IF HOSTCODE <> 0 THEN
    + PRINT 'Correct';
    +END IF;
    +EXCEPTION WHEN OTHERS THEN
    + PRINT 'FAILED';
    +END
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/sub.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/sub.sql b/hplsql/src/test/queries/local/sub.sql
    new file mode 100644
    index 0000000..a32bef7
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/sub.sql
    @@ -0,0 +1 @@
    +DATE '2015-01-01' - 1
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/substr.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/substr.sql b/hplsql/src/test/queries/local/substr.sql
    new file mode 100644
    index 0000000..7785e39
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/substr.sql
    @@ -0,0 +1,2 @@
    +SUBSTR('FAILED Correct', 8);
    +SUBSTR('FAILED Correct FAILED', 8, 7);
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/substring.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/substring.sql b/hplsql/src/test/queries/local/substring.sql
    new file mode 100644
    index 0000000..c94a191
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/substring.sql
    @@ -0,0 +1,8 @@
    +SUBSTRING('FAILED Correct', 8);
    +SUBSTRING('FAILED Correct FAILED', 8, 7);
    +
    +SUBSTRING('FAILED Correct' FROM 8);
    +SUBSTRING('FAILED Correct FAILED' FROM 8 FOR 7);
    +
    +SUBSTRING('', 8);
    +SUBSTRING(NULL, 8);

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/timestamp.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/timestamp.sql b/hplsql/src/test/queries/local/timestamp.sql
    new file mode 100644
    index 0000000..2971cea
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/timestamp.sql
    @@ -0,0 +1,4 @@
    +TIMESTAMP '2015-03-03 11:39:31.123456';
    +TIMESTAMP '2015-03-03 11:39:31.123';
    +TIMESTAMP '2015-03-03 11:39:31';
    +TIMESTAMP '2015-03-03-11.39.31.123';
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/timestamp_iso.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/timestamp_iso.sql b/hplsql/src/test/queries/local/timestamp_iso.sql
    new file mode 100644
    index 0000000..9bcdfe0
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/timestamp_iso.sql
    @@ -0,0 +1,2 @@
    +TIMESTAMP_ISO('2015-03-12');
    +TIMESTAMP_ISO(DATE '2015-03-12');
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/to_char.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/to_char.sql b/hplsql/src/test/queries/local/to_char.sql
    new file mode 100644
    index 0000000..339c7d6
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/to_char.sql
    @@ -0,0 +1 @@
    +TO_CHAR(DATE '2015-04-02')
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/to_timestamp.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/to_timestamp.sql b/hplsql/src/test/queries/local/to_timestamp.sql
    new file mode 100644
    index 0000000..c18f1f4
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/to_timestamp.sql
    @@ -0,0 +1,5 @@
    +TO_TIMESTAMP('2015-04-02', 'YYYY-MM-DD');
    +TO_TIMESTAMP('2015-04-02', 'yyyy-mm-dd');
    +TO_TIMESTAMP('04/02/2015', 'mm/dd/yyyy');
    +
    +TO_TIMESTAMP('2015-04-02 13:51:31', 'YYYY-MM-DD HH24:MI:SS');
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/trim.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/trim.sql b/hplsql/src/test/queries/local/trim.sql
    new file mode 100644
    index 0000000..f8a2978
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/trim.sql
    @@ -0,0 +1 @@
    +'#' || TRIM(' Hello ') || '#';
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/twopipes.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/twopipes.sql b/hplsql/src/test/queries/local/twopipes.sql
    new file mode 100644
    index 0000000..c1d6f1d
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/twopipes.sql
    @@ -0,0 +1 @@
    +'a' || 'b' || 'c'
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/upper.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/upper.sql b/hplsql/src/test/queries/local/upper.sql
    new file mode 100644
    index 0000000..9b3b522
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/upper.sql
    @@ -0,0 +1 @@
    +UPPER('abc');
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/values_into.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/values_into.sql b/hplsql/src/test/queries/local/values_into.sql
    new file mode 100644
    index 0000000..e49894a
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/values_into.sql
    @@ -0,0 +1,6 @@
    +VALUES 'A' INTO code;
    +VALUES (0, 100) INTO (limit, count);
    +
    +PRINT code;
    +PRINT count;
    +PRINT limit;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/queries/local/while.sql
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/queries/local/while.sql b/hplsql/src/test/queries/local/while.sql
    new file mode 100644
    index 0000000..2dc4b54
    --- /dev/null
    +++ b/hplsql/src/test/queries/local/while.sql
    @@ -0,0 +1,20 @@
    +DECLARE count INT DEFAULT 7;
    +
    +WHILE count <> 0 LOOP
    + PRINT count;
    + count := count - 1;
    +END LOOP;
    +
    +SET count = 7;
    +
    +WHILE count <> 0 DO
    + PRINT count;
    + SET count = count - 1;
    +END WHILE;
    +
    +SET count = 7;
    +
    +WHILE count <> 0 BEGIN
    + PRINT count;
    + SET count = count - 1;
    +END
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/add.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/add.out.txt b/hplsql/src/test/results/local/add.out.txt
    new file mode 100644
    index 0000000..37a195b
    --- /dev/null
    +++ b/hplsql/src/test/results/local/add.out.txt
    @@ -0,0 +1,2 @@
    +2015-01-01
    +2015-01-01

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/assign.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/assign.out.txt b/hplsql/src/test/results/local/assign.out.txt
    new file mode 100644
    index 0000000..c01e270
    --- /dev/null
    +++ b/hplsql/src/test/results/local/assign.out.txt
    @@ -0,0 +1,8 @@
    +Ln:1 SET code = 'A'
    +Ln:2 SET status = 1
    +Ln:3 SET count = 0
    +Ln:5 SET code = 'A'
    +Ln:6 SET status = 1
    +Ln:6 SET count = 0
    +Ln:7 SET count = 0
    +Ln:7 SET limit = 100

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/bool_expr.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/bool_expr.out.txt b/hplsql/src/test/results/local/bool_expr.out.txt
    new file mode 100644
    index 0000000..514f324
    --- /dev/null
    +++ b/hplsql/src/test/results/local/bool_expr.out.txt
    @@ -0,0 +1,32 @@
    +Ln:1 IF
    +Ln:1 IF TRUE executed
    +Ln:2 PRINT
    +Correct
    +Ln:7 IF
    +Ln:7 IF TRUE executed
    +Ln:8 PRINT
    +Correct
    +Ln:13 IF
    +Ln:13 IF TRUE executed
    +Ln:14 PRINT
    +Correct
    +Ln:19 IF
    +Ln:19 ELSE executed
    +Ln:22 PRINT
    +Correct
    +Ln:25 IF
    +Ln:25 IF TRUE executed
    +Ln:26 PRINT
    +Correct
    +Ln:31 IF
    +Ln:31 IF TRUE executed
    +Ln:32 PRINT
    +Correct
    +Ln:37 IF
    +Ln:37 IF TRUE executed
    +Ln:38 PRINT
    +Correct
    +Ln:43 IF
    +Ln:43 ELSE executed
    +Ln:46 PRINT
    +Correct
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/break.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/break.out.txt b/hplsql/src/test/results/local/break.out.txt
    new file mode 100644
    index 0000000..cf907df
    --- /dev/null
    +++ b/hplsql/src/test/results/local/break.out.txt
    @@ -0,0 +1,29 @@
    +Ln:1 DECLARE count INT = 3
    +Ln:2 WHILE - ENTERED
    +Ln:3 PRINT
    +Start of while block
    +Ln:4 PRINT
    +3
    +Ln:5 SET count = 2
    +Ln:6 IF
    +Ln:8 PRINT
    +End of while block
    +Ln:3 PRINT
    +Start of while block
    +Ln:4 PRINT
    +2
    +Ln:5 SET count = 1
    +Ln:6 IF
    +Ln:8 PRINT
    +End of while block
    +Ln:3 PRINT
    +Start of while block
    +Ln:4 PRINT
    +1
    +Ln:5 SET count = 0
    +Ln:6 IF
    +Ln:6 IF TRUE executed
    +Ln:7 BREAK
    +Ln:2 WHILE - LEFT
    +Ln:10 PRINT
    +End of script
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/case.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/case.out.txt b/hplsql/src/test/results/local/case.out.txt
    new file mode 100644
    index 0000000..6062a1f
    --- /dev/null
    +++ b/hplsql/src/test/results/local/case.out.txt
    @@ -0,0 +1,12 @@
    +Ln:1 PRINT
    +Correct
    +Ln:8 PRINT
    +Correct
    +Ln:14 PRINT
    +Correct
    +Ln:19 PRINT
    +Correct
    +Ln:26 PRINT
    +Correct
    +Ln:32 PRINT
    +Correct

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/cast.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/cast.out.txt b/hplsql/src/test/results/local/cast.out.txt
    new file mode 100644
    index 0000000..f3de493
    --- /dev/null
    +++ b/hplsql/src/test/results/local/cast.out.txt
    @@ -0,0 +1,8 @@
    +Ln:1 FUNC CAST
    +A
    +Ln:2 FUNC CAST
    +Ab
    +Ln:3 FUNC CAST
    +Abc
    +Ln:4 FUNC CAST
    +2015-03-12

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/char.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/char.out.txt b/hplsql/src/test/results/local/char.out.txt
    new file mode 100644
    index 0000000..83b33d2
    --- /dev/null
    +++ b/hplsql/src/test/results/local/char.out.txt
    @@ -0,0 +1 @@
    +1000

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/coalesce.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/coalesce.out.txt b/hplsql/src/test/results/local/coalesce.out.txt
    new file mode 100644
    index 0000000..a111c85
    --- /dev/null
    +++ b/hplsql/src/test/results/local/coalesce.out.txt
    @@ -0,0 +1,4 @@
    +First non-null
    +First non-null
    +First non-null
    +First non-null

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/concat.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/concat.out.txt b/hplsql/src/test/results/local/concat.out.txt
    new file mode 100644
    index 0000000..cdddd69
    --- /dev/null
    +++ b/hplsql/src/test/results/local/concat.out.txt
    @@ -0,0 +1,2 @@
    +abc
    +NULL Value

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/create_function.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/create_function.out.txt b/hplsql/src/test/results/local/create_function.out.txt
    new file mode 100644
    index 0000000..b996ab4
    --- /dev/null
    +++ b/hplsql/src/test/results/local/create_function.out.txt
    @@ -0,0 +1,9 @@
    +Ln:1 CREATE FUNCTION hello
    +Ln:10 PRINT
    +Ln:10 EXEC FUNCTION hello
    +Ln:4 PRINT
    +Start
    +Ln:5 RETURN
    +Hello, world!
    +Ln:11 PRINT
    +End of script

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/create_function2.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/create_function2.out.txt b/hplsql/src/test/results/local/create_function2.out.txt
    new file mode 100644
    index 0000000..c8fc993
    --- /dev/null
    +++ b/hplsql/src/test/results/local/create_function2.out.txt
    @@ -0,0 +1,10 @@
    +Ln:1 CREATE FUNCTION hello2
    +Ln:10 PRINT
    +Ln:10 EXEC FUNCTION hello2
    +Ln:10 SET PARAM text = world
    +Ln:4 PRINT
    +Start
    +Ln:5 RETURN
    +Hello, world!
    +Ln:11 PRINT
    +End of script

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/create_procedure.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/create_procedure.out.txt b/hplsql/src/test/results/local/create_procedure.out.txt
    new file mode 100644
    index 0000000..1f86916
    --- /dev/null
    +++ b/hplsql/src/test/results/local/create_procedure.out.txt
    @@ -0,0 +1,8 @@
    +Ln:1 CREATE PROCEDURE set_message
    +Ln:6 DECLARE str STRING
    +Ln:7 EXEC PROCEDURE set_message
    +Ln:7 SET PARAM name = world
    +Ln:7 SET PARAM result = null
    +Ln:3 SET result = 'Hello, world!'
    +Ln:8 PRINT
    +Hello, world!
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/date.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/date.out.txt b/hplsql/src/test/results/local/date.out.txt
    new file mode 100644
    index 0000000..118bd29
    --- /dev/null
    +++ b/hplsql/src/test/results/local/date.out.txt
    @@ -0,0 +1,4 @@
    +2014-12-20
    +2015-03-12
    +2015-03-12
    +2015-03-12

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/dbms_output.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/dbms_output.out.txt b/hplsql/src/test/results/local/dbms_output.out.txt
    new file mode 100644
    index 0000000..b6ed0e0
    --- /dev/null
    +++ b/hplsql/src/test/results/local/dbms_output.out.txt
    @@ -0,0 +1,3 @@
    +Ln:2 DECLARE str VARCHAR = 'Hello, world!'
    +Hello, world!
    +Hello, world!

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/declare.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/declare.out.txt b/hplsql/src/test/results/local/declare.out.txt
    new file mode 100644
    index 0000000..2b7794e
    --- /dev/null
    +++ b/hplsql/src/test/results/local/declare.out.txt
    @@ -0,0 +1,13 @@
    +Ln:2 DECLARE code CHAR
    +Ln:3 DECLARE status INT = 1
    +Ln:4 DECLARE count SMALLINT = NULL
    +Ln:5 DECLARE limit INT = 100
    +Ln:6 DECLARE f UTL_FILE.FILE_TYPE
    +Ln:8 SET status = 2
    +Ln:11 DECLARE code CHAR
    +Ln:12 DECLARE status INT = 1
    +Ln:12 DECLARE status2 INT = 1
    +Ln:13 DECLARE count SMALLINT
    +Ln:13 DECLARE limit INT = 100
    +Ln:15 DECLARE dt DATE = 2015-05-13
    +Ln:16 DECLARE ts TIMESTAMP = 2015-05-13 11:10:01

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/declare_condition.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/declare_condition.out.txt b/hplsql/src/test/results/local/declare_condition.out.txt
    new file mode 100644
    index 0000000..4633c8d
    --- /dev/null
    +++ b/hplsql/src/test/results/local/declare_condition.out.txt
    @@ -0,0 +1,7 @@
    +Ln:2 DECLARE HANDLER
    +Ln:4 IF
    +Ln:4 IF TRUE executed
    +Ln:5 SIGNAL
    +Ln:2 EXIT HANDLER
    +Ln:3 PRINT
    +Condition raised
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/declare_condition2.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/declare_condition2.out.txt b/hplsql/src/test/results/local/declare_condition2.out.txt
    new file mode 100644
    index 0000000..67da39d
    --- /dev/null
    +++ b/hplsql/src/test/results/local/declare_condition2.out.txt
    @@ -0,0 +1,12 @@
    +Ln:2 DECLARE HANDLER
    +Ln:4 DECLARE HANDLER
    +Ln:6 IF
    +Ln:6 IF TRUE executed
    +Ln:7 SIGNAL
    +Ln:4 CONTINUE HANDLER
    +Ln:5 PRINT
    +Condition raised
    +Ln:9 PRINT
    +Executed 1
    +Ln:10 PRINT
    +Executed 2
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/decode.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/decode.out.txt b/hplsql/src/test/results/local/decode.out.txt
    new file mode 100644
    index 0000000..39b01bc
    --- /dev/null
    +++ b/hplsql/src/test/results/local/decode.out.txt
    @@ -0,0 +1,13 @@
    +Ln:1 DECLARE var1 INT = 3
    +Ln:2 PRINT
    +C
    +Ln:3 PRINT
    +C
    +Ln:5 SET var1 = 1
    +Ln:6 PRINT
    +A
    +Ln:8 SET var1 = NULL
    +Ln:9 PRINT
    +C
    +Ln:10 PRINT
    +C

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/equal.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/equal.out.txt b/hplsql/src/test/results/local/equal.out.txt
    new file mode 100644
    index 0000000..9c02e38
    --- /dev/null
    +++ b/hplsql/src/test/results/local/equal.out.txt
    @@ -0,0 +1,48 @@
    +Ln:1 PRINT
    +Case 1 = 1
    +Ln:2 IF
    +Ln:2 IF TRUE executed
    +Ln:3 PRINT
    +Equal - Correct
    +Ln:8 PRINT
    +Case 1 == 1
    +Ln:9 IF
    +Ln:9 IF TRUE executed
    +Ln:10 PRINT
    +Equal - Correct
    +Ln:15 PRINT
    +Case 1 <> 3
    +Ln:16 IF
    +Ln:16 IF TRUE executed
    +Ln:17 PRINT
    +Not equal - Correct
    +Ln:22 PRINT
    +Case 1 != 3
    +Ln:23 IF
    +Ln:23 IF TRUE executed
    +Ln:24 PRINT
    +Not equal - Correct
    +Ln:29 PRINT
    +Case 3 > 1
    +Ln:30 IF
    +Ln:30 IF TRUE executed
    +Ln:31 PRINT
    +Greater - Correct
    +Ln:36 PRINT
    +Case 1 < 3
    +Ln:37 IF
    +Ln:37 IF TRUE executed
    +Ln:38 PRINT
    +Less - Correct
    +Ln:43 PRINT
    +Case 3 >= 1
    +Ln:44 IF
    +Ln:44 IF TRUE executed
    +Ln:45 PRINT
    +Greater or equal - Correct
    +Ln:50 PRINT
    +Case 1 <= 3
    +Ln:51 IF
    +Ln:51 IF TRUE executed
    +Ln:52 PRINT
    +Less or equal - Correct
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/exception.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/exception.out.txt b/hplsql/src/test/results/local/exception.out.txt
    new file mode 100644
    index 0000000..5de7998
    --- /dev/null
    +++ b/hplsql/src/test/results/local/exception.out.txt
    @@ -0,0 +1,13 @@
    +Ln:2 PRINT
    +Correct
    +Ln:3 WHILE - ENTERED
    +Ln:4 FETCH
    +Ln:4 Cursor not found: cur
    +Ln:3 WHILE - LEFT
    +Ln:7 EXCEPTION HANDLER
    +Ln:8 PRINT
    +Correct
    +Ln:9 PRINT
    +Correct
    +Ln:10 PRINT
    +Correct - Exception raised
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/exception2.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/exception2.out.txt b/hplsql/src/test/results/local/exception2.out.txt
    new file mode 100644
    index 0000000..f56a326
    --- /dev/null
    +++ b/hplsql/src/test/results/local/exception2.out.txt
    @@ -0,0 +1,5 @@
    +Ln:2 DECLARE v VARCHAR
    +Ln:4 OPEN
    +Ln:4 cur: SELECT c1 FROM t1
    +Ln:7 EXCEPTION HANDLER
    +Error

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/exit.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/exit.out.txt b/hplsql/src/test/results/local/exit.out.txt
    new file mode 100644
    index 0000000..0352275
    --- /dev/null
    +++ b/hplsql/src/test/results/local/exit.out.txt
    @@ -0,0 +1,42 @@
    +Ln:1 DECLARE count INT = 3
    +Ln:3 WHILE - ENTERED
    +Ln:4 PRINT
    +Start of while block
    +Ln:5 PRINT
    +3
    +Ln:6 SET count = 2
    +Ln:7 EXIT
    +Ln:8 PRINT
    +End of while block
    +Ln:4 PRINT
    +Start of while block
    +Ln:5 PRINT
    +2
    +Ln:6 SET count = 1
    +Ln:7 EXIT
    +Ln:8 PRINT
    +End of while block
    +Ln:4 PRINT
    +Start of while block
    +Ln:5 PRINT
    +1
    +Ln:6 SET count = 0
    +Ln:7 EXIT
    +Ln:3 WHILE - LEFT
    +Ln:11 SET count = 3
    +Ln:14 WHILE - ENTERED
    +Ln:15 PRINT
    +Start of outer while block
    +Ln:17 WHILE - ENTERED
    +Ln:18 PRINT
    +Start of 1st inner while block
    +Ln:19 EXIT
    +Ln:17 WHILE - LEFT
    +Ln:24 WHILE - ENTERED
    +Ln:25 PRINT
    +Start of 2nd inner while block
    +Ln:26 EXIT
    +Ln:24 WHILE - LEFT
    +Ln:14 WHILE - LEFT
    +Ln:31 PRINT
    +End of script

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/expr.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/expr.out.txt b/hplsql/src/test/results/local/expr.out.txt
    new file mode 100644
    index 0000000..a5377e3
    --- /dev/null
    +++ b/hplsql/src/test/results/local/expr.out.txt
    @@ -0,0 +1,29 @@
    +Ln:1 PRINT
    +ab
    +Ln:2 PRINT
    +a1b
    +Ln:3 PRINT
    +1ab
    +Ln:4 PRINT
    +ab
    +Ln:5 PRINT
    +ab
    +Ln:6 PRINT
    +null
    +Ln:8 DECLARE c INT
    +Ln:10 PRINT
    +Integer increment
    +Ln:11 SET c = 3
    +Ln:12 SET c = 4
    +Ln:13 PRINT
    +4
    +Ln:15 PRINT
    +Integer decrement
    +Ln:16 SET c = 3
    +Ln:17 SET c = 2
    +Ln:18 PRINT
    +2
    +Ln:20 PRINT
    +Correct
    +Ln:21 PRINT
    +Correct
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/for_range.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/for_range.out.txt b/hplsql/src/test/results/local/for_range.out.txt
    new file mode 100644
    index 0000000..dc29c95
    --- /dev/null
    +++ b/hplsql/src/test/results/local/for_range.out.txt
    @@ -0,0 +1,65 @@
    +Ln:1 DECLARE i INT = 3
    +Ln:2 PRINT
    +3
    +Ln:4 FOR RANGE - ENTERED
    +Ln:5 PRINT
    +1
    +Ln:5 PRINT
    +2
    +Ln:5 PRINT
    +3
    +Ln:5 PRINT
    +4
    +Ln:5 PRINT
    +5
    +Ln:5 PRINT
    +6
    +Ln:5 PRINT
    +7
    +Ln:5 PRINT
    +8
    +Ln:5 PRINT
    +9
    +Ln:5 PRINT
    +10
    +Ln:4 FOR RANGE - LEFT
    +Ln:8 PRINT
    +3
    +Ln:10 FOR RANGE - ENTERED
    +Ln:11 PRINT
    +10
    +Ln:11 PRINT
    +9
    +Ln:11 PRINT
    +8
    +Ln:11 PRINT
    +7
    +Ln:11 PRINT
    +6
    +Ln:11 PRINT
    +5
    +Ln:11 PRINT
    +4
    +Ln:11 PRINT
    +3
    +Ln:11 PRINT
    +2
    +Ln:11 PRINT
    +1
    +Ln:10 FOR RANGE - LEFT
    +Ln:14 PRINT
    +3
    +Ln:16 FOR RANGE - ENTERED
    +Ln:17 PRINT
    +1
    +Ln:17 PRINT
    +3
    +Ln:17 PRINT
    +5
    +Ln:17 PRINT
    +7
    +Ln:17 PRINT
    +9
    +Ln:16 FOR RANGE - LEFT
    +Ln:20 PRINT
    +3

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/if.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/if.out.txt b/hplsql/src/test/results/local/if.out.txt
    new file mode 100644
    index 0000000..1da8142
    --- /dev/null
    +++ b/hplsql/src/test/results/local/if.out.txt
    @@ -0,0 +1,40 @@
    +Ln:1 DECLARE state VARCHAR
    +Ln:2 DECLARE count INT
    +Ln:4 SET state = 'CA'
    +Ln:5 SET count = 1
    +Ln:11 IF
    +Ln:11 IF TRUE executed
    +Ln:12 PRINT
    +True block - Correct
    +Ln:17 IF
    +Ln:17 ELSE executed
    +Ln:20 PRINT
    +False block - Correct
    +Ln:23 IF
    +Ln:23 ELSE executed
    +Ln:30 PRINT
    +False block - Correct
    +Ln:33 IF
    +Ln:33 ELSE IF executed
    +Ln:38 PRINT
    +True block - Correct
    +Ln:43 PRINT
    +IS NOT NULL AND BETWEEN
    +Ln:44 IF
    +Ln:44 IF TRUE executed
    +Ln:45 PRINT
    +True block - Correct
    +Ln:50 PRINT
    +Transact-SQL - Single statement
    +Ln:52 IF
    +Ln:52 IF TRUE executed
    +Ln:53 PRINT
    +True block - Correct
    +Ln:57 PRINT
    +Transact-SQL - BEGIN-END block
    +Ln:59 IF
    +Ln:59 IF TRUE executed
    +Ln:61 PRINT
    +True block - Correct
    +Ln:62 PRINT
    +True block - Correct
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/instr.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/instr.out.txt b/hplsql/src/test/results/local/instr.out.txt
    new file mode 100644
    index 0000000..a1ed71f
    --- /dev/null
    +++ b/hplsql/src/test/results/local/instr.out.txt
    @@ -0,0 +1,33 @@
    +Ln:1 IF
    +Ln:1 IF TRUE executed
    +Ln:2 PRINT
    +Correct
    +Ln:7 IF
    +Ln:7 IF TRUE executed
    +Ln:8 PRINT
    +Correct
    +Ln:13 IF
    +Ln:13 IF TRUE executed
    +Ln:14 PRINT
    +Correct
    +Ln:19 IF
    +Ln:19 IF TRUE executed
    +Ln:20 PRINT
    +Correct
    +Ln:25 IF
    +Ln:25 IF TRUE executed
    +Ln:26 PRINT
    +Correct
    +Ln:31 DECLARE c STRING
    +Ln:33 IF
    +Ln:33 IF TRUE executed
    +Ln:34 PRINT
    +Correct
    +Ln:39 IF
    +Ln:39 IF TRUE executed
    +Ln:40 PRINT
    +Correct
    +Ln:45 IF
    +Ln:45 IF TRUE executed
    +Ln:46 PRINT
    +Correct
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/interval.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/interval.out.txt b/hplsql/src/test/results/local/interval.out.txt
    new file mode 100644
    index 0000000..2dcdcd5
    --- /dev/null
    +++ b/hplsql/src/test/results/local/interval.out.txt
    @@ -0,0 +1,11 @@
    +2015-03-13
    +2015-03-13 00:00:00
    +2015-03-12 10:10:10.001
    +2015-03-15
    +2015-03-15 00:00:00
    +2015-03-11
    +2015-03-11 00:00:00
    +2015-03-12 10:10:09.999
    +2015-03-09
    +2015-03-09 00:00:00
    +2015-03-10 23:59:59

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/lang.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/lang.out.txt b/hplsql/src/test/results/local/lang.out.txt
    new file mode 100644
    index 0000000..0047ec4
    --- /dev/null
    +++ b/hplsql/src/test/results/local/lang.out.txt
    @@ -0,0 +1,34 @@
    +1
    +1
    +0
    +-1
    +1.0
    +1.0
    +-1.0
    +Ln:19 DECLARE abc int
    +Ln:20 DECLARE abc.abc int
    +Ln:21 DECLARE abc . abc1 int
    +Ln:22 DECLARE "abc" int
    +Ln:23 DECLARE "abc".abc int
    +Ln:24 DECLARE "abc"."abc" int
    +Ln:25 DECLARE "abc" . "abc1" int
    +Ln:26 DECLARE [abc] int
    +Ln:27 DECLARE [abc].abc int
    +Ln:28 DECLARE [abc].[abc] int
    +Ln:29 DECLARE [abc] . [abc1] int
    +Ln:30 DECLARE `abc` int
    +Ln:31 DECLARE `abc`.abc int
    +Ln:32 DECLARE `abc`.`abc` int
    +Ln:33 DECLARE `abc` . `abc1` int
    +Ln:34 DECLARE :new.abc int
    +Ln:35 DECLARE @abc int
    +Ln:36 DECLARE _abc int
    +Ln:37 DECLARE #abc int
    +Ln:38 DECLARE ##abc int
    +Ln:39 DECLARE $abc int
    +Ln:40 DECLARE abc_9 int
    +2
    +2
    +0
    +-2
    +0

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/leave.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/leave.out.txt b/hplsql/src/test/results/local/leave.out.txt
    new file mode 100644
    index 0000000..8e57245
    --- /dev/null
    +++ b/hplsql/src/test/results/local/leave.out.txt
    @@ -0,0 +1,42 @@
    +Ln:1 DECLARE count INT = 3
    +Ln:3 WHILE - ENTERED
    +Ln:4 PRINT
    +Start of while block
    +Ln:5 PRINT
    +3
    +Ln:6 SET count = 2
    +Ln:7 IF
    +Ln:10 PRINT
    +End of while block
    +Ln:4 PRINT
    +Start of while block
    +Ln:5 PRINT
    +2
    +Ln:6 SET count = 1
    +Ln:7 IF
    +Ln:10 PRINT
    +End of while block
    +Ln:4 PRINT
    +Start of while block
    +Ln:5 PRINT
    +1
    +Ln:6 SET count = 0
    +Ln:7 IF
    +Ln:7 IF TRUE executed
    +Ln:8 LEAVE
    +Ln:3 WHILE - LEFT
    +Ln:13 SET count = 3
    +Ln:16 WHILE - ENTERED
    +Ln:17 PRINT
    +Start of outer while block
    +Ln:20 WHILE - ENTERED
    +Ln:21 PRINT
    +Start of 1st inner while block
    +Ln:22 LEAVE
    +Ln:20 WHILE - LEFT
    +Ln:27 WHILE - ENTERED
    +Ln:28 PRINT
    +Start of 2nd inner while block
    +Ln:29 LEAVE
    +Ln:27 WHILE - LEFT
    +Ln:16 WHILE - LEFT
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/len.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/len.out.txt b/hplsql/src/test/results/local/len.out.txt
    new file mode 100644
    index 0000000..00750ed
    --- /dev/null
    +++ b/hplsql/src/test/results/local/len.out.txt
    @@ -0,0 +1 @@
    +3

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/length.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/length.out.txt b/hplsql/src/test/results/local/length.out.txt
    new file mode 100644
    index 0000000..b8626c4
    --- /dev/null
    +++ b/hplsql/src/test/results/local/length.out.txt
    @@ -0,0 +1 @@
    +4

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/lower.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/lower.out.txt b/hplsql/src/test/results/local/lower.out.txt
    new file mode 100644
    index 0000000..8baef1b
    --- /dev/null
    +++ b/hplsql/src/test/results/local/lower.out.txt
    @@ -0,0 +1 @@
    +abc

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/nvl.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/nvl.out.txt b/hplsql/src/test/results/local/nvl.out.txt
    new file mode 100644
    index 0000000..a111c85
    --- /dev/null
    +++ b/hplsql/src/test/results/local/nvl.out.txt
    @@ -0,0 +1,4 @@
    +First non-null
    +First non-null
    +First non-null
    +First non-null

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/nvl2.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/nvl2.out.txt b/hplsql/src/test/results/local/nvl2.out.txt
    new file mode 100644
    index 0000000..09acc48
    --- /dev/null
    +++ b/hplsql/src/test/results/local/nvl2.out.txt
    @@ -0,0 +1,2 @@
    +Correct
    +Correct

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/plhqlexception.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/plhqlexception.out.txt b/hplsql/src/test/results/local/plhqlexception.out.txt
    new file mode 100644
    index 0000000..439cbbb
    --- /dev/null
    +++ b/hplsql/src/test/results/local/plhqlexception.out.txt
    @@ -0,0 +1,6 @@
    +Ln:1 PRINT
    +Correct
    +Ln:2 WHILE - ENTERED
    +Ln:3 FETCH
    +Ln:3 Cursor not found: cur
    +Ln:2 WHILE - LEFT
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/plhqlexception1.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/plhqlexception1.out.txt b/hplsql/src/test/results/local/plhqlexception1.out.txt
    new file mode 100644
    index 0000000..9b88f62
    --- /dev/null
    +++ b/hplsql/src/test/results/local/plhqlexception1.out.txt
    @@ -0,0 +1,10 @@
    +Ln:1 PRINT
    +Correct
    +Ln:2 DECLARE HANDLER
    +Ln:4 WHILE - ENTERED
    +Ln:5 FETCH
    +Ln:5 Cursor not found: cur
    +Ln:4 WHILE - LEFT
    +Ln:2 EXIT HANDLER
    +Ln:3 PRINT
    +Correct - Exception raised

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/plhqlexception2.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/plhqlexception2.out.txt b/hplsql/src/test/results/local/plhqlexception2.out.txt
    new file mode 100644
    index 0000000..74de5b2
    --- /dev/null
    +++ b/hplsql/src/test/results/local/plhqlexception2.out.txt
    @@ -0,0 +1,106 @@
    +Ln:1 DECLARE cnt INT = 0
    +Ln:2 PRINT
    +Correct
    +Ln:3 DECLARE HANDLER
    +Ln:5 WHILE - ENTERED
    +Ln:6 FETCH
    +Ln:6 Cursor not found: cur
    +Ln:3 CONTINUE HANDLER
    +Ln:4 PRINT
    +Correct - Exception raised
    +Ln:7 PRINT
    +0
    +Ln:8 PRINT
    +Correct - exception handled
    +Ln:9 SET cnt = 1
    +Ln:6 FETCH
    +Ln:6 Cursor not found: cur
    +Ln:3 CONTINUE HANDLER
    +Ln:4 PRINT
    +Correct - Exception raised
    +Ln:7 PRINT
    +1
    +Ln:8 PRINT
    +Correct - exception handled
    +Ln:9 SET cnt = 2
    +Ln:6 FETCH
    +Ln:6 Cursor not found: cur
    +Ln:3 CONTINUE HANDLER
    +Ln:4 PRINT
    +Correct - Exception raised
    +Ln:7 PRINT
    +2
    +Ln:8 PRINT
    +Correct - exception handled
    +Ln:9 SET cnt = 3
    +Ln:6 FETCH
    +Ln:6 Cursor not found: cur
    +Ln:3 CONTINUE HANDLER
    +Ln:4 PRINT
    +Correct - Exception raised
    +Ln:7 PRINT
    +3
    +Ln:8 PRINT
    +Correct - exception handled
    +Ln:9 SET cnt = 4
    +Ln:6 FETCH
    +Ln:6 Cursor not found: cur
    +Ln:3 CONTINUE HANDLER
    +Ln:4 PRINT
    +Correct - Exception raised
    +Ln:7 PRINT
    +4
    +Ln:8 PRINT
    +Correct - exception handled
    +Ln:9 SET cnt = 5
    +Ln:6 FETCH
    +Ln:6 Cursor not found: cur
    +Ln:3 CONTINUE HANDLER
    +Ln:4 PRINT
    +Correct - Exception raised
    +Ln:7 PRINT
    +5
    +Ln:8 PRINT
    +Correct - exception handled
    +Ln:9 SET cnt = 6
    +Ln:6 FETCH
    +Ln:6 Cursor not found: cur
    +Ln:3 CONTINUE HANDLER
    +Ln:4 PRINT
    +Correct - Exception raised
    +Ln:7 PRINT
    +6
    +Ln:8 PRINT
    +Correct - exception handled
    +Ln:9 SET cnt = 7
    +Ln:6 FETCH
    +Ln:6 Cursor not found: cur
    +Ln:3 CONTINUE HANDLER
    +Ln:4 PRINT
    +Correct - Exception raised
    +Ln:7 PRINT
    +7
    +Ln:8 PRINT
    +Correct - exception handled
    +Ln:9 SET cnt = 8
    +Ln:6 FETCH
    +Ln:6 Cursor not found: cur
    +Ln:3 CONTINUE HANDLER
    +Ln:4 PRINT
    +Correct - Exception raised
    +Ln:7 PRINT
    +8
    +Ln:8 PRINT
    +Correct - exception handled
    +Ln:9 SET cnt = 9
    +Ln:6 FETCH
    +Ln:6 Cursor not found: cur
    +Ln:3 CONTINUE HANDLER
    +Ln:4 PRINT
    +Correct - Exception raised
    +Ln:7 PRINT
    +9
    +Ln:8 PRINT
    +Correct - exception handled
    +Ln:9 SET cnt = 10
    +Ln:5 WHILE - LEFT

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/print.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/print.out.txt b/hplsql/src/test/results/local/print.out.txt
    new file mode 100644
    index 0000000..65a1016
    --- /dev/null
    +++ b/hplsql/src/test/results/local/print.out.txt
    @@ -0,0 +1,6 @@
    +Ln:1 PRINT
    +1
    +Ln:2 PRINT
    +abc
    +Ln:3 PRINT
    +abc
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/return.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/return.out.txt b/hplsql/src/test/results/local/return.out.txt
    new file mode 100644
    index 0000000..f4d5bc9
    --- /dev/null
    +++ b/hplsql/src/test/results/local/return.out.txt
    @@ -0,0 +1,3 @@
    +Ln:1 PRINT
    +Before return
    +Ln:2 RETURN
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/select_conversion.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/select_conversion.out.txt b/hplsql/src/test/results/local/select_conversion.out.txt
    new file mode 100644
    index 0000000..602304e
    --- /dev/null
    +++ b/hplsql/src/test/results/local/select_conversion.out.txt
    @@ -0,0 +1,9 @@
    +Ln:1 DECLARE v1 STRING = abc
    +Ln:3 SELECT
    +Ln:3 Statement:
    +SELECT CONCAT('a', 'b', 'c'), CONCAT('a', 'b') FROM default.dual
    +Ln:3 Not executed - offline mode set
    +Ln:5 SELECT
    +Ln:5 Statement:
    +SELECT 'abc' AS c1, CONCAT('abc', 'abc'), NVL(NVL(CONCAT('abc', NVL(id, 1), id), 1), 1), 'abc', 'abc' AS c4 FROM default.dual t1 WHERE 'abc' = 'abc' AND (NVL(NVL('abc', 1), 1) = 'abc' or 'abc' = 'abc')
    +Ln:5 Not executed - offline mode set
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/seterror.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/seterror.out.txt b/hplsql/src/test/results/local/seterror.out.txt
    new file mode 100644
    index 0000000..3c093cc
    --- /dev/null
    +++ b/hplsql/src/test/results/local/seterror.out.txt
    @@ -0,0 +1,6 @@
    +Ln:2 SET plhql.onerror = NULL
    +Ln:4 HOST
    +Ln:4 HOST Command: abcd
    +Ln:8 EXCEPTION HANDLER
    +Ln:9 PRINT
    +FAILED
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/sub.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/sub.out.txt b/hplsql/src/test/results/local/sub.out.txt
    new file mode 100644
    index 0000000..c35d146
    --- /dev/null
    +++ b/hplsql/src/test/results/local/sub.out.txt
    @@ -0,0 +1 @@
    +2014-12-31
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/substr.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/substr.out.txt b/hplsql/src/test/results/local/substr.out.txt
    new file mode 100644
    index 0000000..09acc48
    --- /dev/null
    +++ b/hplsql/src/test/results/local/substr.out.txt
    @@ -0,0 +1,2 @@
    +Correct
    +Correct

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/substring.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/substring.out.txt b/hplsql/src/test/results/local/substring.out.txt
    new file mode 100644
    index 0000000..820d65a
    --- /dev/null
    +++ b/hplsql/src/test/results/local/substring.out.txt
    @@ -0,0 +1,8 @@
    +Correct
    +Correct
    +Ln:4 FUNC SUBSTRING
    +Correct
    +Ln:5 FUNC SUBSTRING
    +Correct
    +
    +null
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/timestamp.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/timestamp.out.txt b/hplsql/src/test/results/local/timestamp.out.txt
    new file mode 100644
    index 0000000..bb58d46
    --- /dev/null
    +++ b/hplsql/src/test/results/local/timestamp.out.txt
    @@ -0,0 +1,4 @@
    +2015-03-03 11:39:31.123
    +2015-03-03 11:39:31.123
    +2015-03-03 11:39:31
    +2015-03-03 11:39:31.123
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/timestamp_iso.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/timestamp_iso.out.txt b/hplsql/src/test/results/local/timestamp_iso.out.txt
    new file mode 100644
    index 0000000..997df7f
    --- /dev/null
    +++ b/hplsql/src/test/results/local/timestamp_iso.out.txt
    @@ -0,0 +1,2 @@
    +2015-03-12 00:00:00
    +2015-03-12 00:00:00

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/to_char.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/to_char.out.txt b/hplsql/src/test/results/local/to_char.out.txt
    new file mode 100644
    index 0000000..22e8cef
    --- /dev/null
    +++ b/hplsql/src/test/results/local/to_char.out.txt
    @@ -0,0 +1 @@
    +2015-04-02

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/to_timestamp.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/to_timestamp.out.txt b/hplsql/src/test/results/local/to_timestamp.out.txt
    new file mode 100644
    index 0000000..1ee7278
    --- /dev/null
    +++ b/hplsql/src/test/results/local/to_timestamp.out.txt
    @@ -0,0 +1,4 @@
    +2015-04-02 00:00:00
    +2015-04-02 00:00:00
    +2015-04-02 00:00:00
    +2015-04-02 13:51:31

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/trim.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/trim.out.txt b/hplsql/src/test/results/local/trim.out.txt
    new file mode 100644
    index 0000000..bbf851d
    --- /dev/null
    +++ b/hplsql/src/test/results/local/trim.out.txt
    @@ -0,0 +1 @@
    +#Hello#

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/twopipes.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/twopipes.out.txt b/hplsql/src/test/results/local/twopipes.out.txt
    new file mode 100644
    index 0000000..f2ba8f8
    --- /dev/null
    +++ b/hplsql/src/test/results/local/twopipes.out.txt
    @@ -0,0 +1 @@
    +abc
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/upper.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/upper.out.txt b/hplsql/src/test/results/local/upper.out.txt
    new file mode 100644
    index 0000000..5da849b
    --- /dev/null
    +++ b/hplsql/src/test/results/local/upper.out.txt
    @@ -0,0 +1 @@
    +ABC

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/values_into.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/values_into.out.txt b/hplsql/src/test/results/local/values_into.out.txt
    new file mode 100644
    index 0000000..d698e88
    --- /dev/null
    +++ b/hplsql/src/test/results/local/values_into.out.txt
    @@ -0,0 +1,11 @@
    +Ln:1 VALUES statement
    +Ln:1 SET code = A
    +Ln:2 VALUES statement
    +Ln:2 SET limit = 0
    +Ln:2 SET count = 100
    +Ln:4 PRINT
    +A
    +Ln:5 PRINT
    +100
    +Ln:6 PRINT
    +0
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/7338d8e1/hplsql/src/test/results/local/while.out.txt
    ----------------------------------------------------------------------
    diff --git a/hplsql/src/test/results/local/while.out.txt b/hplsql/src/test/results/local/while.out.txt
    new file mode 100644
    index 0000000..4a3ca0e
    --- /dev/null
    +++ b/hplsql/src/test/results/local/while.out.txt
    @@ -0,0 +1,72 @@
    +Ln:1 DECLARE count INT = 7
    +Ln:3 WHILE - ENTERED
    +Ln:4 PRINT
    +7
    +Ln:5 SET count = 6
    +Ln:4 PRINT
    +6
    +Ln:5 SET count = 5
    +Ln:4 PRINT
    +5
    +Ln:5 SET count = 4
    +Ln:4 PRINT
    +4
    +Ln:5 SET count = 3
    +Ln:4 PRINT
    +3
    +Ln:5 SET count = 2
    +Ln:4 PRINT
    +2
    +Ln:5 SET count = 1
    +Ln:4 PRINT
    +1
    +Ln:5 SET count = 0
    +Ln:3 WHILE - LEFT
    +Ln:8 SET count = 7
    +Ln:10 WHILE - ENTERED
    +Ln:11 PRINT
    +7
    +Ln:12 SET count = 6
    +Ln:11 PRINT
    +6
    +Ln:12 SET count = 5
    +Ln:11 PRINT
    +5
    +Ln:12 SET count = 4
    +Ln:11 PRINT
    +4
    +Ln:12 SET count = 3
    +Ln:11 PRINT
    +3
    +Ln:12 SET count = 2
    +Ln:11 PRINT
    +2
    +Ln:12 SET count = 1
    +Ln:11 PRINT
    +1
    +Ln:12 SET count = 0
    +Ln:10 WHILE - LEFT
    +Ln:15 SET count = 7
    +Ln:17 WHILE - ENTERED
    +Ln:18 PRINT
    +7
    +Ln:19 SET count = 6
    +Ln:18 PRINT
    +6
    +Ln:19 SET count = 5
    +Ln:18 PRINT
    +5
    +Ln:19 SET count = 4
    +Ln:18 PRINT
    +4
    +Ln:19 SET count = 3
    +Ln:18 PRINT
    +3
    +Ln:19 SET count = 2
    +Ln:18 PRINT
    +2
    +Ln:19 SET count = 1
    +Ln:18 PRINT
    +1
    +Ln:19 SET count = 0
    +Ln:17 WHILE - LEFT
    \ No newline at end of file
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11279 : Hive should emit lineage information in json compact format (Lenni Kuff via Szehon)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2b1f03e3
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2b1f03e3
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2b1f03e3

    Branch: refs/heads/spark
    Commit: 2b1f03e33d2bbfc6916e80caff963475c2740f82
    Parents: 7788968
    Author: Szehon Ho <szehon@cloudera.com>
    Authored: Fri Jul 17 11:19:25 2015 -0700
    Committer: Szehon Ho <szehon@cloudera.com>
    Committed: Fri Jul 17 11:19:55 2015 -0700

    ----------------------------------------------------------------------
      .../hadoop/hive/ql/hooks/LineageLogger.java | 4 +-
      .../test/results/clientpositive/lineage2.q.out | 2296 +-----------------
      .../test/results/clientpositive/lineage3.q.out | 2235 +----------------
      3 files changed, 58 insertions(+), 4477 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/2b1f03e3/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
    index fc32af7..d615372 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
    @@ -137,9 +137,7 @@ public class LineageLogger implements ExecuteWithHookContext {
            try {
              StringBuilderWriter out = new StringBuilderWriter(1024);
              JsonWriter writer = new JsonWriter(out);
    - writer.setIndent(" ");

    - out.append("POSTHOOK: LINEAGE: ");
              String queryStr = plan.getQueryStr().trim();
              writer.beginObject();
              writer.name("version").value(FORMAT_VERSION);
    @@ -182,7 +180,7 @@ public class LineageLogger implements ExecuteWithHookContext {
                // Log to console
                log(lineage);
              } else {
    - // In none test mode, emit to a log file,
    + // In non-test mode, emit to a log file,
                // which can be different from the normal hive.log.
                // For example, using NoDeleteRollingFileAppender to
                // log to some file with different rolling policy.

    http://git-wip-us.apache.org/repos/asf/hive/blob/2b1f03e3/ql/src/test/results/clientpositive/lineage2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/lineage2.q.out b/ql/src/test/results/clientpositive/lineage2.q.out
    index 669be97..9b227c6 100644
    --- a/ql/src/test/results/clientpositive/lineage2.q.out
    +++ b/ql/src/test/results/clientpositive/lineage2.q.out
    @@ -5,118 +5,12 @@ PREHOOK: type: CREATETABLE_AS_SELECT
      PREHOOK: Input: default@src1
      PREHOOK: Output: database:default
      PREHOOK: Output: default@src2
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "3a39d46286e4c2cd2139c9bb248f7b4f",
    - "queryText": "create table src2 as select key key2, value value2 from src1",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.key2"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.value2"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"3a39d46286e4c2cd2139c9bb248f7b4f","queryText":"create table src2 as select key key2, value value2 from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src2.value2"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
      PREHOOK: query: select * from src1 where key is not null and value is not null limit 3
      PREHOOK: type: QUERY
      PREHOOK: Input: default@src1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "b5b224847b2333e790a2c229434a04c8",
    - "queryText": "select * from src1 where key is not null and value is not null limit 3",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 2,
    - 3
    - ],
    - "targets": [
    - 0,
    - 1
    - ],
    - "expression": "(src1.key is not null and src1.value is not null)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "src1.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "src1.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"b5b224847b2333e790a2c229434a04c8","queryText":"select * from src1 where key is not null and value is not null limit 3","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"(src1.key is not null and src1.value is not null)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
      238 val_238

      311 val_311
    @@ -124,66 +18,7 @@ PREHOOK: query: select * from src1 where key > 10 and value > 'val' order by key
      PREHOOK: type: QUERY
      PREHOOK: Input: default@src1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "773d9d0ea92e797eae292ae1eeea11ab",
    - "queryText": "select * from src1 where key > 10 and value > 'val' order by key limit 5",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 2,
    - 3
    - ],
    - "targets": [
    - 0,
    - 1
    - ],
    - "expression": "((UDFToDouble(src1.key) > UDFToDouble(10)) and (src1.value > 'val'))",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "src1.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "src1.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"773d9d0ea92e797eae292ae1eeea11ab","queryText":"select * from src1 where key > 10 and value > 'val' order by key limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((UDFToDouble(src1.key) > UDFToDouble(10)) and (src1.value > 'val'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
      146 val_146
      150 val_150
      213 val_213
    @@ -196,158 +31,17 @@ PREHOOK: type: CREATETABLE_AS_SELECT
      PREHOOK: Input: default@src1
      PREHOOK: Output: database:default
      PREHOOK: Output: default@dest1
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "712fe958c357bcfc978b95c43eb19084",
    - "queryText": "create table dest1 as select * from src1",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"712fe958c357bcfc978b95c43eb19084","queryText":"create table dest1 as select * from src1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
      PREHOOK: query: insert into table dest1 select * from src2
      PREHOOK: type: QUERY
      PREHOOK: Input: default@src2
      PREHOOK: Output: default@dest1
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "ecc718a966d8887b18084a55dd96f0bc",
    - "queryText": "insert into table dest1 select * from src2",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.key2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.value2"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"ecc718a966d8887b18084a55dd96f0bc","queryText":"insert into table dest1 select * from src2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
      PREHOOK: query: select key k, dest1.value from dest1
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dest1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "416b6f4cd63edd4f9d8213d2d7819d21",
    - "queryText": "select key k, dest1.value from dest1",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "k"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "dest1.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.key"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.value"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"416b6f4cd63edd4f9d8213d2d7819d21","queryText":"select key k, dest1.value from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"dest1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
      238 val_238

      311 val_311
    @@ -403,42 +97,7 @@ PREHOOK: type: QUERY
      PREHOOK: Input: default@src1
      PREHOOK: Input: default@src2
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "50fa3d1074b3fda37ce11dc6ec92ebf3",
    - "queryText": "select key from src1 union select key2 from src2 order by key",
    - "edges": [
    - {
    - "sources": [
    - 1,
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "expression": "key",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "u2.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.key2"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"50fa3d1074b3fda37ce11dc6ec92ebf3","queryText":"select key from src1 union select key2 from src2 order by key","edges":[{"sources":[1,2],"targets":[0],"expression":"key","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"u2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}

      128
      146
    @@ -460,42 +119,7 @@ PREHOOK: type: QUERY
      PREHOOK: Input: default@src1
      PREHOOK: Input: default@src2
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "a739460bd79c8c91ec35e22c97329769",
    - "queryText": "select key k from src1 union select key2 from src2 order by k",
    - "edges": [
    - {
    - "sources": [
    - 1,
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "expression": "key",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "u2.k"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.key2"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"a739460bd79c8c91ec35e22c97329769","queryText":"select key k from src1 union select key2 from src2 order by k","edges":[{"sources":[1,2],"targets":[0],"expression":"key","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"u2.k"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src2.key2"}]}

      128
      146
    @@ -516,55 +140,7 @@ PREHOOK: query: select key, count(1) a from dest1 group by key
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dest1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "3901b5e3a164064736b3234355046340",
    - "queryText": "select key, count(1) a from dest1 group by key",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "count(1)",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "a"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.key"
    - },
    - {
    - "id": 3,
    - "vertexType": "TABLE",
    - "vertexId": "default.dest1"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"3901b5e3a164064736b3234355046340","queryText":"select key, count(1) a from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(1)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"a"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"TABLE","vertexId":"default.dest1"}]}
       20
      128 2
      146 2
    @@ -585,55 +161,7 @@ PREHOOK: query: select key k, count(*) from dest1 group by key
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dest1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "0d5a212f10847aeaab31e8c31121e6d4",
    - "queryText": "select key k, count(*) from dest1 group by key",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "count(*)",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "k"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "c1"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.key"
    - },
    - {
    - "id": 3,
    - "vertexType": "TABLE",
    - "vertexId": "default.dest1"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"0d5a212f10847aeaab31e8c31121e6d4","queryText":"select key k, count(*) from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(*)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"TABLE","vertexId":"default.dest1"}]}
       20
      128 2
      146 2
    @@ -654,55 +182,7 @@ PREHOOK: query: select key k, count(value) from dest1 group by key
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dest1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "56429eccb04ded722f5bd9d9d8cf7260",
    - "queryText": "select key k, count(value) from dest1 group by key",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "count(default.dest1.value)",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "k"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "c1"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.key"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.value"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"56429eccb04ded722f5bd9d9d8cf7260","queryText":"select key k, count(value) from dest1 group by key","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"count(default.dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"k"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
       20
      128 2
      146 2
    @@ -723,55 +203,7 @@ PREHOOK: query: select value, max(length(key)) from dest1 group by value
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dest1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "7e1cfc3dece85b41b6f7c46365580cde",
    - "queryText": "select value, max(length(key)) from dest1 group by value",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "max(length(dest1.key))",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "value"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "c1"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.value"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.key"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"7e1cfc3dece85b41b6f7c46365580cde","queryText":"select value, max(length(key)) from dest1 group by value","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
       3
      val_146 3
      val_150 3
    @@ -795,55 +227,7 @@ PREHOOK: query: select value, max(length(key)) from dest1 group by value order b
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dest1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "c6578ce1dd72498c4af33f20f164e483",
    - "queryText": "select value, max(length(key)) from dest1 group by value order by value limit 5",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "max(length(dest1.key))",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "value"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "c1"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.value"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.key"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"c6578ce1dd72498c4af33f20f164e483","queryText":"select value, max(length(key)) from dest1 group by value order by value limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"max(length(dest1.key))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"value"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.value"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.key"}]}
       3
      val_146 3
      val_150 3
    @@ -853,55 +237,7 @@ PREHOOK: query: select key, length(value) from dest1
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dest1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "91fbcea5cb34362071555cd93e8d0abe",
    - "queryText": "select key, length(value) from dest1",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "length(dest1.value)",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "c1"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.key"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.value"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"91fbcea5cb34362071555cd93e8d0abe","queryText":"select key, length(value) from dest1","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"length(dest1.value)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"key"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
      238 7
       0
      311 7
    @@ -956,36 +292,7 @@ PREHOOK: query: select length(value) + 3 from dest1
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dest1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "3d8a347cc9052111cb328938d37b9b03",
    - "queryText": "select length(value) + 3 from dest1",
    - "edges": [
    - {
    - "sources": [
    - 1
    - ],
    - "targets": [
    - 0
    - ],
    - "expression": "(length(dest1.value) + 3)",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "c0"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest1.value"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"3d8a347cc9052111cb328938d37b9b03","queryText":"select length(value) + 3 from dest1","edges":[{"sources":[1],"targets":[0],"expression":"(length(dest1.value) + 3)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest1.value"}]}
      10
      3
      10
    @@ -1040,29 +347,7 @@ PREHOOK: query: select 5 from dest1
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dest1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "bae960bf4376ec00e37258469b17360d",
    - "queryText": "select 5 from dest1",
    - "edges": [
    - {
    - "sources": [],
    - "targets": [
    - 0
    - ],
    - "expression": "5",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "c0"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"bae960bf4376ec00e37258469b17360d","queryText":"select 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"5","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]}
      5
      5
      5
    @@ -1117,29 +402,7 @@ PREHOOK: query: select 3 * 5 from dest1
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dest1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "753abad4d55afd3df34fdc73abfcd44d",
    - "queryText": "select 3 * 5 from dest1",
    - "edges": [
    - {
    - "sources": [],
    - "targets": [
    - 0
    - ],
    - "expression": "(3 * 5)",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "c0"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"753abad4d55afd3df34fdc73abfcd44d","queryText":"select 3 * 5 from dest1","edges":[{"sources":[],"targets":[0],"expression":"(3 * 5)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"}]}
      15
      15
      15
    @@ -1198,485 +461,31 @@ PREHOOK: Input: default@src1
      PREHOOK: Input: default@src2
      PREHOOK: Output: database:default
      PREHOOK: Output: default@dest2
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "386791c174a4999fc916e300b5e76bf2",
    - "queryText": "create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2",
    - "edges": [
    - {
    - "sources": [
    - 4
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 5
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 6
    - ],
    - "targets": [
    - 2
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 7
    - ],
    - "targets": [
    - 3
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 4,
    - 6
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3
    - ],
    - "expression": "(src1.key = src2.key2)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.key2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.value2"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 5,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - },
    - {
    - "id": 6,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.key2"
    - },
    - {
    - "id": 7,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.value2"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"386791c174a4999fc916e300b5e76bf2","queryText":"create table dest2 as select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.val
      ue2"}]}
      PREHOOK: query: insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2
      PREHOOK: type: QUERY
      PREHOOK: Input: default@src1
      PREHOOK: Input: default@src2
      PREHOOK: Output: default@dest2
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "e494b771d94800dc3430bf5d0810cd9f",
    - "queryText": "insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2",
    - "edges": [
    - {
    - "sources": [
    - 4
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 5
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 6
    - ],
    - "targets": [
    - 2
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 7
    - ],
    - "targets": [
    - 3
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 4,
    - 6
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3
    - ],
    - "expression": "(src1.key = src2.key2)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.key2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.value2"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 5,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - },
    - {
    - "id": 6,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.key2"
    - },
    - {
    - "id": 7,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.value2"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"e494b771d94800dc3430bf5d0810cd9f","queryText":"insert overwrite table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.s
      rc2.value2"}]}
      PREHOOK: query: insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2
      PREHOOK: type: QUERY
      PREHOOK: Input: default@src1
      PREHOOK: Input: default@src2
      PREHOOK: Output: default@dest2
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "efeaddd0d36105b1013b414627850dc2",
    - "queryText": "insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2",
    - "edges": [
    - {
    - "sources": [
    - 4
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 5
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 6
    - ],
    - "targets": [
    - 2
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 7
    - ],
    - "targets": [
    - 3
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 4,
    - 6
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3
    - ],
    - "expression": "(src1.key = src2.key2)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.key2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.value2"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 5,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - },
    - {
    - "id": 6,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.key2"
    - },
    - {
    - "id": 7,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.value2"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"efeaddd0d36105b1013b414627850dc2","queryText":"insert into table dest2 select * from src1 JOIN src2 ON src1.key = src2.key2","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.v
      alue2"}]}
      PREHOOK: query: insert into table dest2
        select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1
      PREHOOK: type: QUERY
      PREHOOK: Input: default@src1
      PREHOOK: Input: default@src2
      PREHOOK: Output: default@dest2
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "e9450a56b3d103642e06bef0e4f0d482",
    - "queryText": "insert into table dest2\n select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1",
    - "edges": [
    - {
    - "sources": [
    - 4
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 5
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 6
    - ],
    - "targets": [
    - 2
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 7
    - ],
    - "targets": [
    - 3
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 5,
    - 7
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3
    - ],
    - "expression": "(length(src1.value) = (length(src2.value2) + 1))",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.key2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.value2"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 5,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - },
    - {
    - "id": 6,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.key2"
    - },
    - {
    - "id": 7,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.value2"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"e9450a56b3d103642e06bef0e4f0d482","queryText":"insert into table dest2\n select * from src1 JOIN src2 ON length(src1.value) = length(src2.value2) + 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[5,7],"targets":[0,1,2,3],"expression":"(length(src1.value) = (length(src2.value2) + 1))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"i
      d":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
      PREHOOK: query: select * from src1 where length(key) > 2
      PREHOOK: type: QUERY
      PREHOOK: Input: default@src1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "4028c94d222d5dd221f651d414386972",
    - "queryText": "select * from src1 where length(key) > 2",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0,
    - 1
    - ],
    - "expression": "(length(src1.key) > 2)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "src1.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "src1.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"4028c94d222d5dd221f651d414386972","queryText":"select * from src1 where length(key) > 2","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(length(src1.key) > 2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
      238 val_238
      311 val_311
      255 val_255
    @@ -1694,66 +503,7 @@ PREHOOK: query: select * from src1 where length(key) > 2 and value > 'a'
      PREHOOK: type: QUERY
      PREHOOK: Input: default@src1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "5727531f7743cfcd60d634d8c835515f",
    - "queryText": "select * from src1 where length(key) > 2 and value > 'a'",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 2,
    - 3
    - ],
    - "targets": [
    - 0,
    - 1
    - ],
    - "expression": "((length(src1.key) > 2) and (src1.value > 'a'))",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "src1.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "src1.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"5727531f7743cfcd60d634d8c835515f","queryText":"select * from src1 where length(key) > 2 and value > 'a'","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[2,3],"targets":[0,1],"expression":"((length(src1.key) > 2) and (src1.value > 'a'))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"src1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"src1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
      238 val_238
      311 val_311
      255 val_255
    @@ -1773,238 +523,14 @@ PREHOOK: Input: default@src1
      PREHOOK: Input: default@src2
      PREHOOK: Output: database:default
      PREHOOK: Output: default@dest3
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "a2c4e9a3ec678039814f5d84b1e38ce4",
    - "queryText": "create table dest3 as\n select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1",
    - "edges": [
    - {
    - "sources": [
    - 4
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 5
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 6
    - ],
    - "targets": [
    - 2
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 7
    - ],
    - "targets": [
    - 3
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 4
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3
    - ],
    - "expression": "(length(src1.key) > 1)",
    - "edgeType": "PREDICATE"
    - },
    - {
    - "sources": [
    - 4,
    - 6
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3
    - ],
    - "expression": "(src1.key = src2.key2)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest3.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest3.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest3.key2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest3.value2"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 5,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - },
    - {
    - "id": 6,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.key2"
    - },
    - {
    - "id": 7,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.value2"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"a2c4e9a3ec678039814f5d84b1e38ce4","queryText":"create table dest3 as\n select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 1","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 1)","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest3.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest3.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest3.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest3.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1.value"
      },{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
      PREHOOK: query: insert overwrite table dest2
        select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3
      PREHOOK: type: QUERY
      PREHOOK: Input: default@src1
      PREHOOK: Input: default@src2
      PREHOOK: Output: default@dest2
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "76d84512204ddc576ad4d93f252e4358",
    - "queryText": "insert overwrite table dest2\n select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3",
    - "edges": [
    - {
    - "sources": [
    - 4
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 5
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 6
    - ],
    - "targets": [
    - 2
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 7
    - ],
    - "targets": [
    - 3
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 4
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3
    - ],
    - "expression": "(length(src1.key) > 3)",
    - "edgeType": "PREDICATE"
    - },
    - {
    - "sources": [
    - 4,
    - 6
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3
    - ],
    - "expression": "(src1.key = src2.key2)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.key2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest2.value2"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 5,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - },
    - {
    - "id": 6,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.key2"
    - },
    - {
    - "id": 7,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src2.value2"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"76d84512204ddc576ad4d93f252e4358","queryText":"insert overwrite table dest2\n select * from src1 JOIN src2 ON src1.key = src2.key2 WHERE length(key) > 3","edges":[{"sources":[4],"targets":[0],"edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[4],"targets":[0,1,2,3],"expression":"(length(src1.key) > 3)","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1,2,3],"expression":"(src1.key = src2.key2)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest2.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest2.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest2.key2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest2.value2"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":5,"vertexType":"COLUMN","vertexId":"default.src1
      .value"},{"id":6,"vertexType":"COLUMN","vertexId":"default.src2.key2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.src2.value2"}]}
      PREHOOK: query: drop table if exists dest_l1
      PREHOOK: type: DROPTABLE
      PREHOOK: query: CREATE TABLE dest_l1(key INT, value STRING) STORED AS TEXTFILE
    @@ -2026,73 +552,7 @@ PREHOOK: type: QUERY
      PREHOOK: Input: default@src
      PREHOOK: Input: default@src1
      PREHOOK: Output: default@dest_l1
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "60b589744e2527dd235a6c8168d6a653",
    - "queryText": "INSERT OVERWRITE TABLE dest_l1\nSELECT j.*\nFROM (SELECT t1.key, p1.value\n FROM src1 t1\n LEFT OUTER JOIN src p1\n ON (t1.key = p1.key)\n UNION ALL\n SELECT t2.key, p2.value\n FROM src1 t2\n LEFT OUTER JOIN src p2\n ON (t2.key = p2.key)) j",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "expression": "UDFToInteger(j.key)",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "j.value",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 4,
    - 2
    - ],
    - "targets": [
    - 0,
    - 1
    - ],
    - "expression": "(p1.key = t1.key)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l1.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l1.value"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src.value"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src.key"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"60b589744e2527dd235a6c8168d6a653","queryText":"INSERT OVERWRITE TABLE dest_l1\nSELECT j.*\nFROM (SELECT t1.key, p1.value\n FROM src1 t1\n LEFT OUTER JOIN src p1\n ON (t1.key = p1.key)\n UNION ALL\n SELECT t2.key, p2.value\n FROM src1 t2\n LEFT OUTER JOIN src p2\n ON (t2.key = p2.key)) j","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(j.key)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"j.value","edgeType":"PROJECTION"},{"sources":[4,2],"targets":[0,1],"expression":"(p1.key = t1.key)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.value"},{"id":2,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src.value"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src.key"}]}
      PREHOOK: query: drop table if exists emp
      PREHOOK: type: DROPTABLE
      PREHOOK: query: drop table if exists dept
    @@ -2133,185 +593,7 @@ PREHOOK: Input: default@dept
      PREHOOK: Input: default@emp
      PREHOOK: Input: default@project
      PREHOOK: Output: default@tgt
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "f59797e0422d2e51515063374dfac361",
    - "queryText": "INSERT INTO TABLE tgt\nSELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, p.project_name\nFROM (\n SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id\n FROM (\n SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id\n FROM emp e JOIN emp m ON e.emp_id = m.emp_id\n ) em\n JOIN dept d ON d.dept_id = em.dept_id\n ) emd JOIN project p ON emd.dept_id = p.project_id",
    - "edges": [
    - {
    - "sources": [
    - 6
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 7
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "emd.name",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 8
    - ],
    - "targets": [
    - 2
    - ],
    - "expression": "emd.emp_id",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 8
    - ],
    - "targets": [
    - 3
    - ],
    - "expression": "emd.mgr_id",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 9
    - ],
    - "targets": [
    - 4
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 10
    - ],
    - "targets": [
    - 5
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 8
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3,
    - 4,
    - 5
    - ],
    - "expression": "(e.emp_id = m.emp_id)",
    - "edgeType": "PREDICATE"
    - },
    - {
    - "sources": [
    - 11,
    - 12
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3,
    - 4,
    - 5
    - ],
    - "expression": "(em._col1 = d.dept_id)",
    - "edgeType": "PREDICATE"
    - },
    - {
    - "sources": [
    - 11,
    - 9
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3,
    - 4,
    - 5
    - ],
    - "expression": "(emd._col4 = p.project_id)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.tgt.dept_name"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.tgt.name"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.tgt.emp_id"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.tgt.mgr_id"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.tgt.proj_id"
    - },
    - {
    - "id": 5,
    - "vertexType": "COLUMN",
    - "vertexId": "default.tgt.proj_name"
    - },
    - {
    - "id": 6,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dept.dept_name"
    - },
    - {
    - "id": 7,
    - "vertexType": "COLUMN",
    - "vertexId": "default.emp.name"
    - },
    - {
    - "id": 8,
    - "vertexType": "COLUMN",
    - "vertexId": "default.emp.emp_id"
    - },
    - {
    - "id": 9,
    - "vertexType": "COLUMN",
    - "vertexId": "default.project.project_id"
    - },
    - {
    - "id": 10,
    - "vertexType": "COLUMN",
    - "vertexId": "default.project.project_name"
    - },
    - {
    - "id": 11,
    - "vertexType": "COLUMN",
    - "vertexId": "default.emp.dept_id"
    - },
    - {
    - "id": 12,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dept.dept_id"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"f59797e0422d2e51515063374dfac361","queryText":"INSERT INTO TABLE tgt\nSELECT emd.dept_name, emd.name, emd.emp_id, emd.mgr_id, p.project_id, p.project_name\nFROM (\n SELECT d.dept_name, em.name, em.emp_id, em.mgr_id, em.dept_id\n FROM (\n SELECT e.name, e.dept_id, e.emp_id emp_id, m.emp_id mgr_id\n FROM emp e JOIN emp m ON e.emp_id = m.emp_id\n ) em\n JOIN dept d ON d.dept_id = em.dept_id\n ) emd JOIN project p ON emd.dept_id = p.project_id","edges":[{"sources":[6],"targets":[0],"edgeType":"PROJECTION"},{"sources":[7],"targets":[1],"expression":"emd.name","edgeType":"PROJECTION"},{"sources":[8],"targets":[2],"expression":"emd.emp_id","edgeType":"PROJECTION"},{"sources":[8],"targets":[3],"expression":"emd.mgr_id","edgeType":"PROJECTION"},{"sources":[9],"targets":[4],"edgeType":"PROJECTION"},{"sources":[10],"targets":[5],"edgeType":"PROJECTION"},{"sources":[8],"targets":[0,1,2,3,4,5],"expression":"(e.emp_id = m.emp_id)","edgeType":"PR
      EDICATE"},{"sources":[11,12],"targets":[0,1,2,3,4,5],"expression":"(em._col1 = d.dept_id)","edgeType":"PREDICATE"},{"sources":[11,9],"targets":[0,1,2,3,4,5],"expression":"(emd._col4 = p.project_id)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.tgt.dept_name"},{"id":1,"vertexType":"COLUMN","vertexId":"default.tgt.name"},{"id":2,"vertexType":"COLUMN","vertexId":"default.tgt.emp_id"},{"id":3,"vertexType":"COLUMN","vertexId":"default.tgt.mgr_id"},{"id":4,"vertexType":"COLUMN","vertexId":"default.tgt.proj_id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.tgt.proj_name"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dept.dept_name"},{"id":7,"vertexType":"COLUMN","vertexId":"default.emp.name"},{"id":8,"vertexType":"COLUMN","vertexId":"default.emp.emp_id"},{"id":9,"vertexType":"COLUMN","vertexId":"default.project.project_id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.project.project_name"},{"id":11,"vertexType":"COLUMN","vertexId":
      "default.emp.dept_id"},{"id":12,"vertexType":"COLUMN","vertexId":"default.dept.dept_id"}]}
      PREHOOK: query: drop table if exists dest_l2
      PREHOOK: type: DROPTABLE
      PREHOOK: query: create table dest_l2 (id int, c1 tinyint, c2 int, c3 bigint) stored as textfile
    @@ -2322,68 +604,7 @@ PREHOOK: query: insert into dest_l2 values(0, 1, 100, 10000)
      PREHOOK: type: QUERY
      PREHOOK: Input: default@values__tmp__table__1
      PREHOOK: Output: default@dest_l2
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "e001334e3f8384806b0f25a7c303045f",
    - "queryText": "insert into dest_l2 values(0, 1, 100, 10000)",
    - "edges": [
    - {
    - "sources": [],
    - "targets": [
    - 0
    - ],
    - "expression": "UDFToInteger(values__tmp__table__1.tmp_values_col1)",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [],
    - "targets": [
    - 1
    - ],
    - "expression": "UDFToByte(values__tmp__table__1.tmp_values_col2)",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [],
    - "targets": [
    - 2
    - ],
    - "expression": "UDFToInteger(values__tmp__table__1.tmp_values_col3)",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [],
    - "targets": [
    - 3
    - ],
    - "expression": "UDFToLong(values__tmp__table__1.tmp_values_col4)",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.id"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.c1"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.c2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.c3"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"e001334e3f8384806b0f25a7c303045f","queryText":"insert into dest_l2 values(0, 1, 100, 10000)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(values__tmp__table__1.tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1],"expression":"UDFToByte(values__tmp__table__1.tmp_values_col2)","edgeType":"PROJECTION"},{"sources":[],"targets":[2],"expression":"UDFToInteger(values__tmp__table__1.tmp_values_col3)","edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToLong(values__tmp__table__1.tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
      PREHOOK: query: select * from (
        select c1 + c2 x from dest_l2
        union all
    @@ -2391,48 +612,7 @@ PREHOOK: query: select * from (
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dest_l2
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "a2c96a96be9d315ede966be5b45ef20e",
    - "queryText": "select * from (\n select c1 + c2 x from dest_l2\n union all\n select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x",
    - "edges": [
    - {
    - "sources": [
    - 1,
    - 2,
    - 3
    - ],
    - "targets": [
    - 0
    - ],
    - "expression": "v2.x",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "v2.x"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.c1"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.c2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.c3"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"a2c96a96be9d315ede966be5b45ef20e","queryText":"select * from (\n select c1 + c2 x from dest_l2\n union all\n select sum(c3) y from (select c3 from dest_l2) v1) v2 order by x","edges":[{"sources":[1,2,3],"targets":[0],"expression":"v2.x","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"v2.x"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"}]}
      101
      10000
      PREHOOK: query: drop table if exists dest_l3
    @@ -2445,60 +625,7 @@ PREHOOK: query: insert into dest_l3 values(0, "s1", "s2", 15)
      PREHOOK: type: QUERY
      PREHOOK: Input: default@values__tmp__table__2
      PREHOOK: Output: default@dest_l3
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "09df51ba6ba2d07f2304523ee505f094",
    - "queryText": "insert into dest_l3 values(0, \"s1\", \"s2\", 15)",
    - "edges": [
    - {
    - "sources": [],
    - "targets": [
    - 0
    - ],
    - "expression": "UDFToInteger(values__tmp__table__2.tmp_values_col1)",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [],
    - "targets": [
    - 1,
    - 2
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [],
    - "targets": [
    - 3
    - ],
    - "expression": "UDFToInteger(values__tmp__table__2.tmp_values_col4)",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l3.id"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l3.c1"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l3.c2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l3.c3"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"09df51ba6ba2d07f2304523ee505f094","queryText":"insert into dest_l3 values(0, \"s1\", \"s2\", 15)","edges":[{"sources":[],"targets":[0],"expression":"UDFToInteger(values__tmp__table__2.tmp_values_col1)","edgeType":"PROJECTION"},{"sources":[],"targets":[1,2],"edgeType":"PROJECTION"},{"sources":[],"targets":[3],"expression":"UDFToInteger(values__tmp__table__2.tmp_values_col4)","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"}]}
      PREHOOK: query: select sum(a.c1) over (partition by a.c1 order by a.id)
      from dest_l2 a
      where a.c2 != 10
    @@ -2507,68 +634,7 @@ having count(a.c2) > 0
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dest_l2
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "0ae7aa4a0cbd1283210fa79e8a19104a",
    - "queryText": "select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0",
    - "edges": [
    - {
    - "sources": [
    - 1,
    - 2,
    - 3
    - ],
    - "targets": [
    - 0
    - ],
    - "expression": "(tok_function sum (. (tok_table_or_col $hdt$_0) $f0) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) $f0)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) $f2)))) (tok_windowvalues (preceding 2147483647) current)))",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "expression": "(a.c2 <> 10)",
    - "edgeType": "PREDICATE"
    - },
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "expression": "(count(default.dest_l2.c2) > 0)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "c0"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.c1"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.c2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.id"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"0ae7aa4a0cbd1283210fa79e8a19104a","queryText":"select sum(a.c1) over (partition by a.c1 order by a.id)\nfrom dest_l2 a\nwhere a.c2 != 10\ngroup by a.c1, a.c2, a.id\nhaving count(a.c2) > 0","edges":[{"sources":[1,2,3],"targets":[0],"expression":"(tok_function sum (. (tok_table_or_col $hdt$_0) $f0) (tok_windowspec (tok_partitioningspec (tok_distributeby (. (tok_table_or_col $hdt$_0) $f0)) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col $hdt$_0) $f2)))) (tok_windowvalues (preceding 2147483647) current)))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0],"expression":"(a.c2 <> 10)","edgeType":"PREDICATE"},{"sources":[2],"targets":[0],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default
      .dest_l2.id"}]}
      1
      PREHOOK: query: select sum(a.c1), count(b.c1), b.c2, b.c3
      from dest_l2 a join dest_l3 b on (a.id = b.id)
    @@ -2580,150 +646,7 @@ PREHOOK: type: QUERY
      PREHOOK: Input: default@dest_l2
      PREHOOK: Input: default@dest_l3
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "01879c619517509d9f5b6ead998bb4bb",
    - "queryText": "select sum(a.c1), count(b.c1), b.c2, b.c3\nfrom dest_l2 a join dest_l3 b on (a.id = b.id)\nwhere a.c2 != 10 and b.c3 > 0\ngroup by a.c1, a.c2, a.id, b.c1, b.c2, b.c3\nhaving count(a.c2) > 0\norder by b.c3 limit 5",
    - "edges": [
    - {
    - "sources": [
    - 4
    - ],
    - "targets": [
    - 0
    - ],
    - "expression": "sum(default.dest_l2.c1)",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 5
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "count(default.dest_l3.c1)",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 6
    - ],
    - "targets": [
    - 2
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 7
    - ],
    - "targets": [
    - 3
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 8,
    - 7
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3
    - ],
    - "expression": "((a.c2 <> 10) and (b.c3 > 0))",
    - "edgeType": "PREDICATE"
    - },
    - {
    - "sources": [
    - 8
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3
    - ],
    - "expression": "(count(default.dest_l2.c2) > 0)",
    - "edgeType": "PREDICATE"
    - },
    - {
    - "sources": [
    - 9,
    - 10
    - ],
    - "targets": [
    - 0,
    - 1,
    - 2,
    - 3
    - ],
    - "expression": "(a.id = b.id)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "_c0"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "_c1"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "b.c2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "b.c3"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.c1"
    - },
    - {
    - "id": 5,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l3.c1"
    - },
    - {
    - "id": 6,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l3.c2"
    - },
    - {
    - "id": 7,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l3.c3"
    - },
    - {
    - "id": 8,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.c2"
    - },
    - {
    - "id": 9,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.id"
    - },
    - {
    - "id": 10,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l3.id"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"01879c619517509d9f5b6ead998bb4bb","queryText":"select sum(a.c1), count(b.c1), b.c2, b.c3\nfrom dest_l2 a join dest_l3 b on (a.id = b.id)\nwhere a.c2 != 10 and b.c3 > 0\ngroup by a.c1, a.c2, a.id, b.c1, b.c2, b.c3\nhaving count(a.c2) > 0\norder by b.c3 limit 5","edges":[{"sources":[4],"targets":[0],"expression":"sum(default.dest_l2.c1)","edgeType":"PROJECTION"},{"sources":[5],"targets":[1],"expression":"count(default.dest_l3.c1)","edgeType":"PROJECTION"},{"sources":[6],"targets":[2],"edgeType":"PROJECTION"},{"sources":[7],"targets":[3],"edgeType":"PROJECTION"},{"sources":[8,7],"targets":[0,1,2,3],"expression":"((a.c2 <> 10) and (b.c3 > 0))","edgeType":"PREDICATE"},{"sources":[8],"targets":[0,1,2,3],"expression":"(count(default.dest_l2.c2) > 0)","edgeType":"PREDICATE"},{"sources":[9,10],"targets":[0,1,2,3],"expression":"(a.id = b.id)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"_c0"},{"id":1,"vertexType":"COLUMN
      ","vertexId":"_c1"},{"id":2,"vertexType":"COLUMN","vertexId":"b.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"b.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.c1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.c1"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.c2"},{"id":7,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"},{"id":8,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":9,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":10,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"}]}
      1 1 s2 15
      PREHOOK: query: drop table if exists t
      PREHOOK: type: DROPTABLE
    @@ -2736,93 +659,7 @@ PREHOOK: Input: default@dest_l2
      PREHOOK: Input: default@dest_l3
      PREHOOK: Output: database:default
      PREHOOK: Output: default@t
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "0d2f15b494111ffe236d5be42a76fa28",
    - "queryText": "create table t as\nselect distinct a.c2, a.c3 from dest_l2 a\ninner join dest_l3 b on (a.id = b.id)\nwhere a.id > 0 and b.c3 = 15",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 4,
    - 5
    - ],
    - "targets": [
    - 0,
    - 1
    - ],
    - "expression": "((a.id > 0) and (b.c3 = 15))",
    - "edgeType": "PREDICATE"
    - },
    - {
    - "sources": [
    - 4,
    - 6
    - ],
    - "targets": [
    - 0,
    - 1
    - ],
    - "expression": "(a.id = b.id)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.t.c2"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.t.c3"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.c2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.c3"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l2.id"
    - },
    - {
    - "id": 5,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l3.c3"
    - },
    - {
    - "id": 6,
    - "vertexType": "COLUMN",
    - "vertexId": "default.dest_l3.id"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"0d2f15b494111ffe236d5be42a76fa28","queryText":"create table t as\nselect distinct a.c2, a.c3 from dest_l2 a\ninner join dest_l3 b on (a.id = b.id)\nwhere a.id > 0 and b.c3 = 15","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"edgeType":"PROJECTION"},{"sources":[4,5],"targets":[0,1],"expression":"((a.id > 0) and (b.c3 = 15))","edgeType":"PREDICATE"},{"sources":[4,6],"targets":[0,1],"expression":"(a.id = b.id)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.c2"},{"id":1,"vertexType":"COLUMN","vertexId":"default.t.c3"},{"id":2,"vertexType":"COLUMN","vertexId":"default.dest_l2.c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.dest_l2.c3"},{"id":4,"vertexType":"COLUMN","vertexId":"default.dest_l2.id"},{"id":5,"vertexType":"COLUMN","vertexId":"default.dest_l3.c3"},{"id":6,"vertexType":"COLUMN","vertexId":"default.dest_l3.id"}]}
      PREHOOK: query: SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),
      concat(substr(src1.key,1,1),sum(substr(src1.value,5)))
      from src1
    @@ -2830,72 +667,7 @@ GROUP BY substr(src1.key,1,1)
      PREHOOK: type: QUERY
      PREHOOK: Input: default@src1
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "5b1022708124ee2b80f9e2e8a0dcb15c",
    - "queryText": "SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),\nconcat(substr(src1.key,1,1),sum(substr(src1.value,5)))\nfrom src1\nGROUP BY substr(src1.key,1,1)",
    - "edges": [
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 0
    - ],
    - "expression": "substr(src1.key, 1, 1)",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 4
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "count(DISTINCT substr(src1.value, 5))",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3,
    - 4
    - ],
    - "targets": [
    - 2
    - ],
    - "expression": "concat(substr(src1.key, 1, 1), sum(substr(src1.value, 5)))",
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "c0"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "c1"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "c2"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.value"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"5b1022708124ee2b80f9e2e8a0dcb15c","queryText":"SELECT substr(src1.key,1,1), count(DISTINCT substr(src1.value,5)),\nconcat(substr(src1.key,1,1),sum(substr(src1.value,5)))\nfrom src1\nGROUP BY substr(src1.key,1,1)","edges":[{"sources":[3],"targets":[0],"expression":"substr(src1.key, 1, 1)","edgeType":"PROJECTION"},{"sources":[4],"targets":[1],"expression":"count(DISTINCT substr(src1.value, 5))","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[2],"expression":"concat(substr(src1.key, 1, 1), sum(substr(src1.value, 5)))","edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"c2"},{"id":3,"vertexType":"COLUMN","vertexId":"default.src1.key"},{"id":4,"vertexType":"COLUMN","vertexId":"default.src1.value"}]}
       7 1543.0
      1 3 1296.0
      2 6 21257.0
  • Sunchao at Jul 20, 2015 at 8:12 pm
    HIVE-11285 : ObjectInspector for partition columns in FetchOperator in SMBJoin causes exception (Pengcheng Xiong via Ashutosh Chauhan)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/8646c12f
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/8646c12f
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/8646c12f

    Branch: refs/heads/spark
    Commit: 8646c12f7438a166d32cf8527733fe68d152a831
    Parents: b0247cf
    Author: Pengcheng Xiong <pxiong@hortonworks.com>
    Authored: Thu Jul 16 02:09:00 2015 +0700
    Committer: Ashutosh Chauhan <hashutosh@apache.org>
    Committed: Fri Jul 17 13:06:10 2015 -0700

    ----------------------------------------------------------------------
      .../hadoop/hive/ql/exec/FetchOperator.java | 5 +-
      .../clientpositive/smb_join_partition_key.q | 35 +++++
      .../clientpositive/smb_join_partition_key.q.out | 128 +++++++++++++++++++
      3 files changed, 166 insertions(+), 2 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/8646c12f/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
    index 258d28e..4c6f7ee 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java
    @@ -229,8 +229,9 @@ public class FetchOperator implements Serializable {
          String[] partKeyTypes = pcolTypes.trim().split(":");
          ObjectInspector[] inspectors = new ObjectInspector[partKeys.length];
          for (int i = 0; i < partKeys.length; i++) {
    - inspectors[i] = TypeInfoUtils.getStandardJavaObjectInspectorFromTypeInfo(
    - TypeInfoFactory.getPrimitiveTypeInfo(partKeyTypes[i]));
    + inspectors[i] = PrimitiveObjectInspectorFactory
    + .getPrimitiveWritableObjectInspector(TypeInfoFactory
    + .getPrimitiveTypeInfo(partKeyTypes[i]));
          }
          return ObjectInspectorFactory.getStandardStructObjectInspector(
              Arrays.asList(partKeys), Arrays.asList(inspectors));

    http://git-wip-us.apache.org/repos/asf/hive/blob/8646c12f/ql/src/test/queries/clientpositive/smb_join_partition_key.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/smb_join_partition_key.q b/ql/src/test/queries/clientpositive/smb_join_partition_key.q
    new file mode 100644
    index 0000000..49e2d2f
    --- /dev/null
    +++ b/ql/src/test/queries/clientpositive/smb_join_partition_key.q
    @@ -0,0 +1,35 @@
    +SET hive.execution.engine=mr;
    +SET hive.enforce.sortmergebucketmapjoin=false;
    +SET hive.auto.convert.sortmerge.join=true;
    +SET hive.optimize.bucketmapjoin = true;
    +SET hive.optimize.bucketmapjoin.sortedmerge = true;
    +SET hive.input.format = org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
    +SET hive.exec.dynamic.partition.mode=nonstrict;
    +
    +CREATE TABLE data_table (key INT, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|';
    +
    +insert into table data_table values(1, 'one');
    +insert into table data_table values(2, 'two');
    +
    +CREATE TABLE smb_table (key INT, value STRING) CLUSTERED BY (key)
    +SORTED BY (key) INTO 1 BUCKETS STORED AS ORC;
    +
    +CREATE TABLE smb_table_part (key INT, value STRING) PARTITIONED BY (p1 DECIMAL)
    +CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS
    +STORED AS ORC;
    +
    +INSERT OVERWRITE TABLE smb_table SELECT * FROM data_table;
    +
    +INSERT OVERWRITE TABLE smb_table_part PARTITION (p1) SELECT key, value, 100 as p1 FROM data_table;
    +
    +SELECT s1.key, s2.p1 FROM smb_table s1 INNER JOIN smb_table_part s2 ON s1.key = s2.key ORDER BY s1.key;
    +
    +drop table smb_table_part;
    +
    +CREATE TABLE smb_table_part (key INT, value STRING) PARTITIONED BY (p1 double)
    +CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS
    +STORED AS ORC;
    +
    +INSERT OVERWRITE TABLE smb_table_part PARTITION (p1) SELECT key, value, 100 as p1 FROM data_table;
    +
    +SELECT s1.key, s2.p1 FROM smb_table s1 INNER JOIN smb_table_part s2 ON s1.key = s2.key ORDER BY s1.key;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/8646c12f/ql/src/test/results/clientpositive/smb_join_partition_key.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/smb_join_partition_key.q.out b/ql/src/test/results/clientpositive/smb_join_partition_key.q.out
    new file mode 100644
    index 0000000..a4ab8c3
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/smb_join_partition_key.q.out
    @@ -0,0 +1,128 @@
    +PREHOOK: query: CREATE TABLE data_table (key INT, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@data_table
    +POSTHOOK: query: CREATE TABLE data_table (key INT, value STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@data_table
    +PREHOOK: query: insert into table data_table values(1, 'one')
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@values__tmp__table__1
    +PREHOOK: Output: default@data_table
    +POSTHOOK: query: insert into table data_table values(1, 'one')
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@values__tmp__table__1
    +POSTHOOK: Output: default@data_table
    +POSTHOOK: Lineage: data_table.key EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
    +POSTHOOK: Lineage: data_table.value SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
    +PREHOOK: query: insert into table data_table values(2, 'two')
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@values__tmp__table__2
    +PREHOOK: Output: default@data_table
    +POSTHOOK: query: insert into table data_table values(2, 'two')
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@values__tmp__table__2
    +POSTHOOK: Output: default@data_table
    +POSTHOOK: Lineage: data_table.key EXPRESSION [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
    +POSTHOOK: Lineage: data_table.value SIMPLE [(values__tmp__table__2)values__tmp__table__2.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
    +PREHOOK: query: CREATE TABLE smb_table (key INT, value STRING) CLUSTERED BY (key)
    +SORTED BY (key) INTO 1 BUCKETS STORED AS ORC
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@smb_table
    +POSTHOOK: query: CREATE TABLE smb_table (key INT, value STRING) CLUSTERED BY (key)
    +SORTED BY (key) INTO 1 BUCKETS STORED AS ORC
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@smb_table
    +PREHOOK: query: CREATE TABLE smb_table_part (key INT, value STRING) PARTITIONED BY (p1 DECIMAL)
    +CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS
    +STORED AS ORC
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@smb_table_part
    +POSTHOOK: query: CREATE TABLE smb_table_part (key INT, value STRING) PARTITIONED BY (p1 DECIMAL)
    +CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS
    +STORED AS ORC
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@smb_table_part
    +PREHOOK: query: INSERT OVERWRITE TABLE smb_table SELECT * FROM data_table
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@data_table
    +PREHOOK: Output: default@smb_table
    +POSTHOOK: query: INSERT OVERWRITE TABLE smb_table SELECT * FROM data_table
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@data_table
    +POSTHOOK: Output: default@smb_table
    +POSTHOOK: Lineage: smb_table.key SIMPLE [(data_table)data_table.FieldSchema(name:key, type:int, comment:null), ]
    +POSTHOOK: Lineage: smb_table.value SIMPLE [(data_table)data_table.FieldSchema(name:value, type:string, comment:null), ]
    +PREHOOK: query: INSERT OVERWRITE TABLE smb_table_part PARTITION (p1) SELECT key, value, 100 as p1 FROM data_table
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@data_table
    +PREHOOK: Output: default@smb_table_part
    +POSTHOOK: query: INSERT OVERWRITE TABLE smb_table_part PARTITION (p1) SELECT key, value, 100 as p1 FROM data_table
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@data_table
    +POSTHOOK: Output: default@smb_table_part@p1=100
    +POSTHOOK: Lineage: smb_table_part PARTITION(p1=100).key SIMPLE [(data_table)data_table.FieldSchema(name:key, type:int, comment:null), ]
    +POSTHOOK: Lineage: smb_table_part PARTITION(p1=100).value SIMPLE [(data_table)data_table.FieldSchema(name:value, type:string, comment:null), ]
    +PREHOOK: query: SELECT s1.key, s2.p1 FROM smb_table s1 INNER JOIN smb_table_part s2 ON s1.key = s2.key ORDER BY s1.key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@smb_table
    +PREHOOK: Input: default@smb_table_part
    +PREHOOK: Input: default@smb_table_part@p1=100
    +#### A masked pattern was here ####
    +POSTHOOK: query: SELECT s1.key, s2.p1 FROM smb_table s1 INNER JOIN smb_table_part s2 ON s1.key = s2.key ORDER BY s1.key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@smb_table
    +POSTHOOK: Input: default@smb_table_part
    +POSTHOOK: Input: default@smb_table_part@p1=100
    +#### A masked pattern was here ####
    +1 100
    +2 100
    +PREHOOK: query: drop table smb_table_part
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@smb_table_part
    +PREHOOK: Output: default@smb_table_part
    +POSTHOOK: query: drop table smb_table_part
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@smb_table_part
    +POSTHOOK: Output: default@smb_table_part
    +PREHOOK: query: CREATE TABLE smb_table_part (key INT, value STRING) PARTITIONED BY (p1 double)
    +CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS
    +STORED AS ORC
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@smb_table_part
    +POSTHOOK: query: CREATE TABLE smb_table_part (key INT, value STRING) PARTITIONED BY (p1 double)
    +CLUSTERED BY (key) SORTED BY (key) INTO 1 BUCKETS
    +STORED AS ORC
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@smb_table_part
    +PREHOOK: query: INSERT OVERWRITE TABLE smb_table_part PARTITION (p1) SELECT key, value, 100 as p1 FROM data_table
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@data_table
    +PREHOOK: Output: default@smb_table_part
    +POSTHOOK: query: INSERT OVERWRITE TABLE smb_table_part PARTITION (p1) SELECT key, value, 100 as p1 FROM data_table
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@data_table
    +POSTHOOK: Output: default@smb_table_part@p1=100
    +POSTHOOK: Lineage: smb_table_part PARTITION(p1=100).key SIMPLE [(data_table)data_table.FieldSchema(name:key, type:int, comment:null), ]
    +POSTHOOK: Lineage: smb_table_part PARTITION(p1=100).value SIMPLE [(data_table)data_table.FieldSchema(name:value, type:string, comment:null), ]
    +PREHOOK: query: SELECT s1.key, s2.p1 FROM smb_table s1 INNER JOIN smb_table_part s2 ON s1.key = s2.key ORDER BY s1.key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@smb_table
    +PREHOOK: Input: default@smb_table_part
    +PREHOOK: Input: default@smb_table_part@p1=100
    +#### A masked pattern was here ####
    +POSTHOOK: query: SELECT s1.key, s2.p1 FROM smb_table s1 INNER JOIN smb_table_part s2 ON s1.key = s2.key ORDER BY s1.key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@smb_table
    +POSTHOOK: Input: default@smb_table_part
    +POSTHOOK: Input: default@smb_table_part@p1=100
    +#### A masked pattern was here ####
    +1 100.0
    +2 100.0
  • Sunchao at Jul 20, 2015 at 8:12 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/2b1f03e3/ql/src/test/results/clientpositive/lineage3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/lineage3.q.out b/ql/src/test/results/clientpositive/lineage3.q.out
    index 1afe92d..07b5f31 100644
    --- a/ql/src/test/results/clientpositive/lineage3.q.out
    +++ b/ql/src/test/results/clientpositive/lineage3.q.out
    @@ -8,35 +8,7 @@ PREHOOK: type: CREATETABLE_AS_SELECT
      PREHOOK: Input: default@src1
      PREHOOK: Output: database:default
      PREHOOK: Output: default@t
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "761b3a1f405d8e719d3f0c9147b57a23",
    - "queryText": "create table t as\nselect * from\n (select * from\n (select key from src1 limit 1) v1) v2",
    - "edges": [
    - {
    - "sources": [
    - 1
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "default.t.key"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "default.src1.key"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"761b3a1f405d8e719d3f0c9147b57a23","queryText":"create table t as\nselect * from\n (select * from\n (select key from src1 limit 1) v1) v2","edges":[{"sources":[1],"targets":[0],"edgeType":"PROJECTION"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.t.key"},{"id":1,"vertexType":"COLUMN","vertexId":"default.src1.key"}]}
      PREHOOK: query: drop table if exists dest_l1
      PREHOOK: type: DROPTABLE
      PREHOOK: query: create table dest_l1(a int, b varchar(128))
    @@ -51,66 +23,7 @@ where cint is not null and cint < 0 order by cint, cs limit 5
      PREHOOK: type: QUERY
      PREHOOK: Input: default@alltypesorc
      PREHOOK: Output: default@dest_l1@ds=today
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "2b5891d094ff74e23ec6acf5b4990f45",
    - "queryText": "insert into table dest_l1 partition (ds='today')\nselect cint, cast(cstring1 as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cint < 0 order by cint, cs limit 5",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "CAST( alltypesorc.cstring1 AS varchar(128))",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0,
    - 1
    - ],
    - "expression": "(alltypesorc.cint is not null and (alltypesorc.cint < 0))",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "cint"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "cs"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cint"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cstring1"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"2b5891d094ff74e23ec6acf5b4990f45","queryText":"insert into table dest_l1 partition (ds='today')\nselect cint, cast(cstring1 as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cint < 0 order by cint, cs limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( alltypesorc.cstring1 AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cint < 0))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cint"},{"id":1,"vertexType":"COLUMN","vertexId":"cs"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
      PREHOOK: query: insert into table dest_l1 partition (ds='tomorrow')
      select min(cint), cast(min(cstring1) as varchar(128)) as cs
      from alltypesorc
    @@ -120,215 +33,13 @@ having min(cbigint) > 10
      PREHOOK: type: QUERY
      PREHOOK: Input: default@alltypesorc
      PREHOOK: Output: default@dest_l1@ds=tomorrow
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "4ad6338a8abfe3fe0342198fcbd1f11d",
    - "queryText": "insert into table dest_l1 partition (ds='tomorrow')\nselect min(cint), cast(min(cstring1) as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cboolean1 = true\ngroup by csmallint\nhaving min(cbigint) > 10",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "expression": "min(default.alltypesorc.cint)",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "CAST( min(default.alltypesorc.cstring1) AS varchar(128))",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 2,
    - 4
    - ],
    - "targets": [
    - 0,
    - 1
    - ],
    - "expression": "(alltypesorc.cint is not null and (alltypesorc.cboolean1 = true))",
    - "edgeType": "PREDICATE"
    - },
    - {
    - "sources": [
    - 5
    - ],
    - "targets": [
    - 0,
    - 1
    - ],
    - "expression": "(min(default.alltypesorc.cbigint) > 10)",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "c0"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "cs"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cint"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cstring1"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cboolean1"
    - },
    - {
    - "id": 5,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cbigint"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"4ad6338a8abfe3fe0342198fcbd1f11d","queryText":"insert into table dest_l1 partition (ds='tomorrow')\nselect min(cint), cast(min(cstring1) as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cboolean1 = true\ngroup by csmallint\nhaving min(cbigint) > 10","edges":[{"sources":[2],"targets":[0],"expression":"min(default.alltypesorc.cint)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( min(default.alltypesorc.cstring1) AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2,4],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cboolean1 = true))","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1],"expression":"(min(default.alltypesorc.cbigint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"cs"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN",
      "vertexId":"default.alltypesorc.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
      PREHOOK: query: select cint, rank() over(order by cint) from alltypesorc
      where cint > 10 and cint < 10000 limit 10
      PREHOOK: type: QUERY
      PREHOOK: Input: default@alltypesorc
      #### A masked pattern was here ####
    -POSTHOOK: LINEAGE: {
    - "version": "1.0",
    - "engine": "mr",
    - "hash": "351b08ec58591554ec10a6ded68ef25f",
    - "queryText": "select cint, rank() over(order by cint) from alltypesorc\nwhere cint > 10 and cint < 10000 limit 10",
    - "edges": [
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0
    - ],
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 3,
    - 4,
    - 2,
    - 5,
    - 6,
    - 7,
    - 8,
    - 9,
    - 10,
    - 11,
    - 12,
    - 13
    - ],
    - "targets": [
    - 1
    - ],
    - "expression": "(tok_function rank (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cint)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))",
    - "edgeType": "PROJECTION"
    - },
    - {
    - "sources": [
    - 2
    - ],
    - "targets": [
    - 0,
    - 1
    - ],
    - "expression": "((alltypesorc.cint > 10) and (alltypesorc.cint < 10000))",
    - "edgeType": "PREDICATE"
    - }
    - ],
    - "vertices": [
    - {
    - "id": 0,
    - "vertexType": "COLUMN",
    - "vertexId": "cint"
    - },
    - {
    - "id": 1,
    - "vertexType": "COLUMN",
    - "vertexId": "c1"
    - },
    - {
    - "id": 2,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cint"
    - },
    - {
    - "id": 3,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.ctinyint"
    - },
    - {
    - "id": 4,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.csmallint"
    - },
    - {
    - "id": 5,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cbigint"
    - },
    - {
    - "id": 6,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cfloat"
    - },
    - {
    - "id": 7,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cdouble"
    - },
    - {
    - "id": 8,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cstring1"
    - },
    - {
    - "id": 9,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cstring2"
    - },
    - {
    - "id": 10,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.ctimestamp1"
    - },
    - {
    - "id": 11,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.ctimestamp2"
    - },
    - {
    - "id": 12,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cboolean1"
    - },
    - {
    - "id": 13,
    - "vertexType": "COLUMN",
    - "vertexId": "default.alltypesorc.cboolean2"
    - }
    - ]
    -}
    +{"version":"1.0","engine":"mr","hash":"351b08ec58591554ec10a6ded68ef25f","queryText":"select cint, rank() over(order by cint) from alltypesorc\nwhere cint > 10 and cint < 10000 limit 10","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3,4,2,5,6,7,8,9,10,11,12,13],"targets":[1],"expression":"(tok_function rank (tok_windowspec (tok_partitioningspec (tok_distributeby 0) (tok_orderby (tok_tabsortcolnameasc (. (tok_table_or_col alltypesorc) cint)))) (tok_windowrange (preceding 2147483647) (following 2147483647))))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"((alltypesorc.cint > 10) and (alltypesorc.cint < 10000))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cint"},{"id":1,"vertexType":"COLUMN","vertexId":"c1"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.a
      lltypesorc.csmallint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"},{"id":6,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cfloat"},{"id":7,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cdouble"},{"id":8,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":9,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring2"},{"id":10,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp1"},{"id":11,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctimestamp2"},{"id":12,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":13,