FAQ
Modified: hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_udf.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
==============================================================================
--- hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_udf.q.out (original)
+++ hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_udf.q.out Wed Apr 1 01:15:50 2015
@@ -790,7 +790,7 @@ STAGE PLANS:
              alias: decimal_udf
              Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
              Filter Operator
- predicate: ((key * CAST( value AS decimal(10,0))) > CAST( 0 AS decimal(31,10))) (type: boolean)
+ predicate: ((key * CAST( value AS decimal(10,0))) > 0) (type: boolean)
                Statistics: Num rows: 12 Data size: 1356 Basic stats: COMPLETE Column stats: NONE
                Select Operator
                  expressions: key (type: decimal(20,10)), value (type: int)
@@ -1100,7 +1100,7 @@ STAGE PLANS:
              alias: decimal_udf
              Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
              Select Operator
- expressions: (key / CAST( 0 AS decimal(10,0))) (type: decimal(22,12))
+ expressions: (key / 0) (type: decimal(22,12))
                outputColumnNames: _col0
                Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                Limit
@@ -1192,7 +1192,7 @@ STAGE PLANS:
              alias: decimal_udf
              Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
              Filter Operator
- predicate: (key <> CAST( 0 AS decimal(20,10))) (type: boolean)
+ predicate: (key <> 0) (type: boolean)
                Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                Select Operator
                  expressions: (key / key) (type: decimal(38,24))
@@ -2157,7 +2157,7 @@ STAGE PLANS:
              alias: decimal_udf
              Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
              Select Operator
- expressions: ((key + CAST( 1 AS decimal(10,0))) % (key / CAST( 2 AS decimal(10,0)))) (type: decimal(22,12))
+ expressions: ((key + 1) % (key / 2)) (type: decimal(22,12))
                outputColumnNames: _col0
                Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                File Output Operator

Modified: hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
==============================================================================
--- hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out (original)
+++ hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out Wed Apr 1 01:15:50 2015
@@ -68,10 +68,10 @@ STAGE PLANS:
              alias: decimal_udf2
              Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
              Filter Operator
- predicate: (key = CAST( 10 AS decimal(20,10))) (type: boolean)
+ predicate: (key = 10) (type: boolean)
                Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                Select Operator
- expressions: acos(key) (type: double), asin(key) (type: double), atan(key) (type: double), cos(key) (type: double), sin(key) (type: double), tan(key) (type: double), radians(key) (type: double)
+ expressions: NaN (type: double), NaN (type: double), 1.4711276743037347 (type: double), -0.8390715290764524 (type: double), -0.5440211108893698 (type: double), 0.6483608274590866 (type: double), 0.17453292519943295 (type: double)
                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                  Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                  File Output Operator
@@ -126,10 +126,10 @@ STAGE PLANS:
              alias: decimal_udf2
              Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
              Filter Operator
- predicate: (key = CAST( 10 AS decimal(20,10))) (type: boolean)
+ predicate: (key = 10) (type: boolean)
                Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                Select Operator
- expressions: exp(key) (type: double), ln(key) (type: double), log(key) (type: double), log(key, key) (type: double), log(key, value) (type: double), log(value, key) (type: double), log10(key) (type: double), sqrt(key) (type: double)
+ expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double)
                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                  Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                  File Output Operator

Modified: hive/branches/llap/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
==============================================================================
--- hive/branches/llap/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out (original)
+++ hive/branches/llap/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out Wed Apr 1 01:15:50 2015
@@ -77,11 +77,11 @@ STAGE PLANS:
    Stage: Stage-9
      Map Reduce Local Work
        Alias -> Map Local Tables:
- $hdt$_0:$INTNAME1
+ $INTNAME1
            Fetch Operator
              limit: -1
        Alias -> Map Local Operator Tree:
- $hdt$_0:$INTNAME1
+ $INTNAME1
            TableScan
              HashTable Sink Operator
                keys:
@@ -114,11 +114,11 @@ STAGE PLANS:
    Stage: Stage-10
      Map Reduce Local Work
        Alias -> Map Local Tables:
- $hdt$_0:$INTNAME
+ $INTNAME
            Fetch Operator
              limit: -1
        Alias -> Map Local Operator Tree:
- $hdt$_0:$INTNAME
+ $INTNAME
            TableScan
              HashTable Sink Operator
                keys:
@@ -188,11 +188,11 @@ STAGE PLANS:
    Stage: Stage-11
      Map Reduce Local Work
        Alias -> Map Local Tables:
- $hdt$_0:$hdt$_1:$hdt$_1:lineitem
+ $hdt$_1:lineitem
            Fetch Operator
              limit: -1
        Alias -> Map Local Operator Tree:
- $hdt$_0:$hdt$_1:$hdt$_1:lineitem
+ $hdt$_1:lineitem
            TableScan
              alias: lineitem
              Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE
@@ -342,11 +342,11 @@ STAGE PLANS:
    Stage: Stage-9
      Map Reduce Local Work
        Alias -> Map Local Tables:
- $hdt$_0:$INTNAME1
+ $INTNAME1
            Fetch Operator
              limit: -1
        Alias -> Map Local Operator Tree:
- $hdt$_0:$INTNAME1
+ $INTNAME1
            TableScan
              HashTable Sink Operator
                keys:
@@ -379,11 +379,11 @@ STAGE PLANS:
    Stage: Stage-10
      Map Reduce Local Work
        Alias -> Map Local Tables:
- $hdt$_0:$INTNAME
+ $INTNAME
            Fetch Operator
              limit: -1
        Alias -> Map Local Operator Tree:
- $hdt$_0:$INTNAME
+ $INTNAME
            TableScan
              HashTable Sink Operator
                keys:
@@ -453,11 +453,11 @@ STAGE PLANS:
    Stage: Stage-11
      Map Reduce Local Work
        Alias -> Map Local Tables:
- $hdt$_0:$hdt$_1:$hdt$_1:lineitem
+ $hdt$_1:lineitem
            Fetch Operator
              limit: -1
        Alias -> Map Local Operator Tree:
- $hdt$_0:$hdt$_1:$hdt$_1:lineitem
+ $hdt$_1:lineitem
            TableScan
              alias: lineitem
              Statistics: Num rows: 100 Data size: 11999 Basic stats: COMPLETE Column stats: NONE

Modified: hive/branches/llap/ql/src/test/results/clientpositive/windowing_streaming.q.out
URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/windowing_streaming.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
==============================================================================
--- hive/branches/llap/ql/src/test/results/clientpositive/windowing_streaming.q.out (original)
+++ hive/branches/llap/ql/src/test/results/clientpositive/windowing_streaming.q.out Wed Apr 1 01:15:50 2015
@@ -65,43 +65,39 @@ STAGE PLANS:
            TableScan
              alias: part
              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: p_name (type: string), p_mfgr (type: string)
- outputColumnNames: _col0, _col1
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col1 (type: string), _col0 (type: string)
- sort order: ++
- Map-reduce partition columns: _col1 (type: string)
- Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
        Reduce Operator Tree:
          Select Operator
            expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string)
- outputColumnNames: _col0, _col1
+ outputColumnNames: _col1, _col2
            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
            PTF Operator
              Function definitions:
                  Input definition
                    input alias: ptf_0
- output shape: _col0: string, _col1: string
+ output shape: _col1: string, _col2: string
                    type: WINDOWING
                  Windowing table definition
                    input alias: ptf_1
                    name: windowingtablefunction
- order by: _col0
- partition by: _col1
+ order by: _col1
+ partition by: _col2
                    raw input shape:
                    window functions:
                        window function definition
                          alias: _wcol0
- arguments: _col0
+ arguments: _col1
                          name: rank
                          window function: GenericUDAFRankEvaluator
                          window frame: PRECEDING(MAX)~FOLLOWING(MAX)
                          isPivotResult: true
              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
              Select Operator
- expressions: _col1 (type: string), _wcol0 (type: int)
+ expressions: _col2 (type: string), _wcol0 (type: int)
                outputColumnNames: _col0, _col1
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                File Output Operator
@@ -139,37 +135,33 @@ STAGE PLANS:
            TableScan
              alias: part
              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: p_name (type: string), p_mfgr (type: string)
- outputColumnNames: _col0, _col1
+ Reduce Output Operator
+ key expressions: p_mfgr (type: string), p_name (type: string)
+ sort order: ++
+ Map-reduce partition columns: p_mfgr (type: string)
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col1 (type: string), _col0 (type: string)
- sort order: ++
- Map-reduce partition columns: _col1 (type: string)
- Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
- TopN Hash Memory Usage: 0.8
+ TopN Hash Memory Usage: 0.8
        Reduce Operator Tree:
          Select Operator
            expressions: KEY.reducesinkkey1 (type: string), KEY.reducesinkkey0 (type: string)
- outputColumnNames: _col0, _col1
+ outputColumnNames: _col1, _col2
            Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
            PTF Operator
              Function definitions:
                  Input definition
                    input alias: ptf_0
- output shape: _col0: string, _col1: string
+ output shape: _col1: string, _col2: string
                    type: WINDOWING
                  Windowing table definition
                    input alias: ptf_1
                    name: windowingtablefunction
- order by: _col0
- partition by: _col1
+ order by: _col1
+ partition by: _col2
                    raw input shape:
                    window functions:
                        window function definition
                          alias: _wcol0
- arguments: _col0
+ arguments: _col1
                          name: rank
                          window function: GenericUDAFRankEvaluator
                          window frame: PRECEDING(MAX)~FOLLOWING(MAX)
@@ -179,7 +171,7 @@ STAGE PLANS:
                predicate: (_wcol0 < 4) (type: boolean)
                Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                Select Operator
- expressions: _col1 (type: string), _wcol0 (type: int)
+ expressions: _col2 (type: string), _wcol0 (type: int)
                  outputColumnNames: _col0, _col1
                  Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                  File Output Operator
@@ -312,37 +304,33 @@ STAGE PLANS:
            TableScan
              alias: alltypesorc
              Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
- Select Operator
- expressions: ctinyint (type: tinyint), cdouble (type: double)
- outputColumnNames: _col0, _col1
+ Reduce Output Operator
+ key expressions: ctinyint (type: tinyint), cdouble (type: double)
+ sort order: ++
+ Map-reduce partition columns: ctinyint (type: tinyint)
                Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
- Reduce Output Operator
- key expressions: _col0 (type: tinyint), _col1 (type: double)
- sort order: ++
- Map-reduce partition columns: _col0 (type: tinyint)
- Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
- TopN Hash Memory Usage: 0.8
+ TopN Hash Memory Usage: 0.8
        Reduce Operator Tree:
          Select Operator
            expressions: KEY.reducesinkkey0 (type: tinyint), KEY.reducesinkkey1 (type: double)
- outputColumnNames: _col0, _col1
+ outputColumnNames: _col0, _col5
            Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
            PTF Operator
              Function definitions:
                  Input definition
                    input alias: ptf_0
- output shape: _col0: tinyint, _col1: double
+ output shape: _col0: tinyint, _col5: double
                    type: WINDOWING
                  Windowing table definition
                    input alias: ptf_1
                    name: windowingtablefunction
- order by: _col1
+ order by: _col5
                    partition by: _col0
                    raw input shape:
                    window functions:
                        window function definition
                          alias: _wcol0
- arguments: _col1
+ arguments: _col5
                          name: rank
                          window function: GenericUDAFRankEvaluator
                          window frame: PRECEDING(MAX)~FOLLOWING(MAX)
@@ -352,7 +340,7 @@ STAGE PLANS:
                predicate: (_wcol0 < 5) (type: boolean)
                Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                Select Operator
- expressions: _col0 (type: tinyint), _col1 (type: double), _wcol0 (type: int)
+ expressions: _col0 (type: tinyint), _col5 (type: double), _wcol0 (type: int)
                  outputColumnNames: _col0, _col1, _col2
                  Statistics: Num rows: 4096 Data size: 880654 Basic stats: COMPLETE Column stats: NONE
                  File Output Operator

Modified: hive/branches/llap/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroObjectInspectorGenerator.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroObjectInspectorGenerator.java?rev=1670534&r1=1670533&r2=1670534&view=diff
==============================================================================
--- hive/branches/llap/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroObjectInspectorGenerator.java (original)
+++ hive/branches/llap/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroObjectInspectorGenerator.java Wed Apr 1 01:15:50 2015
@@ -43,13 +43,15 @@ import org.apache.hadoop.hive.serde2.typ
  public class AvroObjectInspectorGenerator {
    final private List<String> columnNames;
    final private List<TypeInfo> columnTypes;
+ final private List<String> columnComments;
    final private ObjectInspector oi;

    public AvroObjectInspectorGenerator(Schema schema) throws SerDeException {
      verifySchemaIsARecord(schema);

- this.columnNames = generateColumnNames(schema);
+ this.columnNames = AvroObjectInspectorGenerator.generateColumnNames(schema);
      this.columnTypes = SchemaToTypeInfo.generateColumnTypes(schema);
+ this.columnComments = AvroObjectInspectorGenerator.generateColumnComments(schema);
      assert columnNames.size() == columnTypes.size();
      this.oi = createObjectInspector();
    }
@@ -80,7 +82,7 @@ public class AvroObjectInspectorGenerato
      for(int i = 0; i < columnNames.size(); i++) {
        columnOIs.add(i, createObjectInspectorWorker(columnTypes.get(i)));
      }
- return ObjectInspectorFactory.getStandardStructObjectInspector(columnNames, columnOIs);
+ return ObjectInspectorFactory.getStandardStructObjectInspector(columnNames, columnOIs, columnComments);
    }

    private ObjectInspector createObjectInspectorWorker(TypeInfo ti) throws SerDeException {
@@ -145,7 +147,7 @@ public class AvroObjectInspectorGenerato
             c.equals(ObjectInspector.Category.UNION);
    }

- private List<String> generateColumnNames(Schema schema) {
+ public static List<String> generateColumnNames(Schema schema) {
      List<Schema.Field> fields = schema.getFields();
      List<String> fieldsList = new ArrayList<String>(fields.size());

@@ -156,4 +158,15 @@ public class AvroObjectInspectorGenerato
      return fieldsList;
    }

+ public static List<String> generateColumnComments(Schema schema) {
+ List<Schema.Field> fields = schema.getFields();
+ List<String> fieldComments = new ArrayList<String>(fields.size());
+
+ for (Schema.Field field : fields) {
+ String fieldComment = field.doc() == null ? "" : field.doc();
+ fieldComments.add(fieldComment);
+ }
+
+ return fieldComments;
+ }
  }

Modified: hive/branches/llap/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerDe.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerDe.java?rev=1670534&r1=1670533&r2=1670534&view=diff
==============================================================================
--- hive/branches/llap/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerDe.java (original)
+++ hive/branches/llap/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerDe.java Wed Apr 1 01:15:50 2015
@@ -133,7 +133,9 @@ public class AvroSerDe extends AbstractS
      if (columnCommentProperty == null || columnCommentProperty.isEmpty()) {
        columnComments = new ArrayList<String>();
      } else {
- columnComments = Arrays.asList(columnCommentProperty.split(","));
+ //Comments are separated by "\0" in columnCommentProperty, see method getSchema
+ //in MetaStoreUtils where this string columns.comments is generated
+ columnComments = Arrays.asList(columnCommentProperty.split("\0"));
        LOG.info("columnComments is " + columnCommentProperty);
      }
      if (columnNames.size() != columnTypes.size()) {

Modified: hive/branches/llap/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java?rev=1670534&r1=1670533&r2=1670534&view=diff
==============================================================================
--- hive/branches/llap/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java (original)
+++ hive/branches/llap/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java Wed Apr 1 01:15:50 2015
@@ -37,6 +37,7 @@ import org.apache.hadoop.hive.metastore.
  import org.apache.hadoop.hive.metastore.api.MetaException;
  import org.apache.hadoop.hive.shims.HadoopShims.KerberosNameShim;
  import org.apache.hadoop.hive.shims.ShimLoader;
+import org.apache.hadoop.hive.thrift.DBTokenStore;
  import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge;
  import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge.Server.ServerMode;
  import org.apache.hadoop.security.SecurityUtil;
@@ -110,9 +111,17 @@ public class HiveAuthFactory {
                          conf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL));
          // start delegation token manager
          try {
- HMSHandler baseHandler = new HiveMetaStore.HMSHandler(
- "new db based metaserver", conf, true);
- saslServer.startDelegationTokenSecretManager(conf, baseHandler.getMS(), ServerMode.HIVESERVER2);
+ // rawStore is only necessary for DBTokenStore
+ Object rawStore = null;
+ String tokenStoreClass = conf.getVar(HiveConf.ConfVars.METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS);
+
+ if (tokenStoreClass.equals(DBTokenStore.class.getName())) {
+ HMSHandler baseHandler = new HiveMetaStore.HMSHandler(
+ "new db based metaserver", conf, true);
+ rawStore = baseHandler.getMS();
+ }
+
+ saslServer.startDelegationTokenSecretManager(conf, rawStore, ServerMode.HIVESERVER2);
          }
          catch (MetaException|IOException e) {
            throw new TTransportException("Failed to start token manager", e);

Modified: hive/branches/llap/shims/common/src/main/java/org/apache/hadoop/hive/shims/ShimLoader.java
URL: http://svn.apache.org/viewvc/hive/branches/llap/shims/common/src/main/java/org/apache/hadoop/hive/shims/ShimLoader.java?rev=1670534&r1=1670533&r2=1670534&view=diff
==============================================================================
--- hive/branches/llap/shims/common/src/main/java/org/apache/hadoop/hive/shims/ShimLoader.java (original)
+++ hive/branches/llap/shims/common/src/main/java/org/apache/hadoop/hive/shims/ShimLoader.java Wed Apr 1 01:15:50 2015
@@ -29,6 +29,9 @@ import org.apache.log4j.AppenderSkeleton
   *
   */
  public abstract class ShimLoader {
+ public static String HADOOP20SVERSIONNAME = "0.20S";
+ public static String HADOOP23VERSIONNAME = "0.23";
+
    private static HadoopShims hadoopShims;
    private static JettyShims jettyShims;
    private static AppenderSkeleton eventCounter;
@@ -42,8 +45,8 @@ public abstract class ShimLoader {
        new HashMap<String, String>();

    static {
- HADOOP_SHIM_CLASSES.put("0.20S", "org.apache.hadoop.hive.shims.Hadoop20SShims");
- HADOOP_SHIM_CLASSES.put("0.23", "org.apache.hadoop.hive.shims.Hadoop23Shims");
+ HADOOP_SHIM_CLASSES.put(HADOOP20SVERSIONNAME, "org.apache.hadoop.hive.shims.Hadoop20SShims");
+ HADOOP_SHIM_CLASSES.put(HADOOP23VERSIONNAME, "org.apache.hadoop.hive.shims.Hadoop23Shims");
    }

    /**
@@ -54,8 +57,8 @@ public abstract class ShimLoader {
        new HashMap<String, String>();

    static {
- JETTY_SHIM_CLASSES.put("0.20S", "org.apache.hadoop.hive.shims.Jetty20SShims");
- JETTY_SHIM_CLASSES.put("0.23", "org.apache.hadoop.hive.shims.Jetty23Shims");
+ JETTY_SHIM_CLASSES.put(HADOOP20SVERSIONNAME, "org.apache.hadoop.hive.shims.Jetty20SShims");
+ JETTY_SHIM_CLASSES.put(HADOOP23VERSIONNAME, "org.apache.hadoop.hive.shims.Jetty23Shims");
    }

    /**
@@ -65,8 +68,10 @@ public abstract class ShimLoader {
        new HashMap<String, String>();

    static {
- EVENT_COUNTER_SHIM_CLASSES.put("0.20S", "org.apache.hadoop.log.metrics.EventCounter");
- EVENT_COUNTER_SHIM_CLASSES.put("0.23", "org.apache.hadoop.log.metrics.EventCounter");
+ EVENT_COUNTER_SHIM_CLASSES.put(HADOOP20SVERSIONNAME, "org.apache.hadoop.log.metrics" +
+ ".EventCounter");
+ EVENT_COUNTER_SHIM_CLASSES.put(HADOOP23VERSIONNAME, "org.apache.hadoop.log.metrics" +
+ ".EventCounter");
    }

    /**
@@ -76,9 +81,9 @@ public abstract class ShimLoader {
        new HashMap<String, String>();

    static {
- HADOOP_THRIFT_AUTH_BRIDGE_CLASSES.put("0.20S",
+ HADOOP_THRIFT_AUTH_BRIDGE_CLASSES.put(HADOOP20SVERSIONNAME,
          "org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge");
- HADOOP_THRIFT_AUTH_BRIDGE_CLASSES.put("0.23",
+ HADOOP_THRIFT_AUTH_BRIDGE_CLASSES.put(HADOOP23VERSIONNAME,
          "org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge23");
    }

@@ -162,9 +167,9 @@ public abstract class ShimLoader {

      switch (Integer.parseInt(parts[0])) {
      case 1:
- return "0.20S";
+ return HADOOP20SVERSIONNAME;
      case 2:
- return "0.23";
+ return HADOOP23VERSIONNAME;
      default:
        throw new IllegalArgumentException("Unrecognized Hadoop major version number: " + vers);
      }

Search Discussions

  • Prasanthj at Apr 1, 2015 at 1:15 am
    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out Wed Apr 1 01:15:50 2015
    @@ -60,27 +60,27 @@ POSTHOOK: Output: default@episodes_parti
      POSTHOOK: Output: default@episodes_partitioned@doctor_pt=5
      POSTHOOK: Output: default@episodes_partitioned@doctor_pt=6
      POSTHOOK: Output: default@episodes_partitioned@doctor_pt=9
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
      PREHOOK: query: ALTER TABLE episodes_partitioned
      SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'
      WITH

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_schema_literal.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_schema_literal.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_schema_literal.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_schema_literal.q.out Wed Apr 1 01:15:50 2015
    @@ -70,20 +70,20 @@ PREHOOK: Input: default@avro1
      POSTHOOK: query: DESCRIBE avro1
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@avro1
    -string1 string from deserializer
    -int1 int from deserializer
    -tinyint1 int from deserializer
    -smallint1 int from deserializer
    -bigint1 bigint from deserializer
    -boolean1 boolean from deserializer
    -float1 float from deserializer
    -double1 double from deserializer
    -list1 array<string> from deserializer
    -map1 map<string,int> from deserializer
    -struct1 struct<sint:int,sboolean:boolean,sstring:string> from deserializer
    -union1 uniontype<float,boolean,string> from deserializer
    -enum1 string from deserializer
    -nullableint int from deserializer
    -bytes1 binary from deserializer
    -fixed1 binary from deserializer
    -dec1 decimal(5,2) from deserializer
    +string1 string
    +int1 int
    +tinyint1 int
    +smallint1 int
    +bigint1 bigint
    +boolean1 boolean
    +float1 float
    +double1 double
    +list1 array<string>
    +map1 map<string,int>
    +struct1 struct<sint:int,sboolean:boolean,sstring:string>
    +union1 uniontype<float,boolean,string>
    +enum1 string
    +nullableint int
    +bytes1 binary
    +fixed1 binary
    +dec1 decimal(5,2)

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/combine2.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/combine2.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/combine2.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/combine2.q.out Wed Apr 1 01:15:50 2015
    @@ -564,14 +564,14 @@ STAGE PLANS:
                    name: default.combine2
                  name: default.combine2
            Truncated Path -> Alias:
    - /combine2/value=2010-04-21 09%3A45%3A00 [$hdt$_0:$hdt$_0:combine2]
    - /combine2/value=val_0 [$hdt$_0:$hdt$_0:combine2]
    - /combine2/value=val_2 [$hdt$_0:$hdt$_0:combine2]
    - /combine2/value=val_4 [$hdt$_0:$hdt$_0:combine2]
    - /combine2/value=val_5 [$hdt$_0:$hdt$_0:combine2]
    - /combine2/value=val_8 [$hdt$_0:$hdt$_0:combine2]
    - /combine2/value=val_9 [$hdt$_0:$hdt$_0:combine2]
    - /combine2/value=| [$hdt$_0:$hdt$_0:combine2]
    + /combine2/value=2010-04-21 09%3A45%3A00 [$hdt$_0:combine2]
    + /combine2/value=val_0 [$hdt$_0:combine2]
    + /combine2/value=val_2 [$hdt$_0:combine2]
    + /combine2/value=val_4 [$hdt$_0:combine2]
    + /combine2/value=val_5 [$hdt$_0:combine2]
    + /combine2/value=val_8 [$hdt$_0:combine2]
    + /combine2/value=val_9 [$hdt$_0:combine2]
    + /combine2/value=| [$hdt$_0:combine2]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer1.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer1.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer1.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer1.q.out Wed Apr 1 01:15:50 2015
    @@ -329,11 +329,11 @@ STAGE PLANS:
        Stage: Stage-6
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x
    + $hdt$_0:$hdt$_0:$hdt$_1:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x
    + $hdt$_0:$hdt$_0:$hdt$_1:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer12.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer12.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer12.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer12.q.out Wed Apr 1 01:15:50 2015
    @@ -27,16 +27,12 @@ STAGE PLANS:
                TableScan
                  alias: x
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: key (type: string), value (type: string)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: key (type: string)
    + sort order: +
    + Map-reduce partition columns: key (type: string)
                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    + value expressions: value (type: string)
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    @@ -116,16 +112,12 @@ STAGE PLANS:
                TableScan
                  alias: y
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: key (type: string), value (type: string)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: key (type: string)
    + sort order: +
    + Map-reduce partition columns: key (type: string)
                    Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    + value expressions: value (type: string)
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer3.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer3.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer3.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer3.q.out Wed Apr 1 01:15:50 2015
    @@ -504,14 +504,14 @@ STAGE PLANS:
        Stage: Stage-9
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x
    + $hdt$_0:$hdt$_0:$hdt$_1:x
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x
    + $hdt$_0:$hdt$_0:$hdt$_1:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -526,7 +526,7 @@ STAGE PLANS:
                        keys:
                          0 _col0 (type: string)
                          1 _col0 (type: string)
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -1208,14 +1208,14 @@ STAGE PLANS:
        Stage: Stage-9
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x
    + $hdt$_0:$hdt$_0:$hdt$_1:x
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x
    + $hdt$_0:$hdt$_0:$hdt$_1:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -1230,7 +1230,7 @@ STAGE PLANS:
                        keys:
                          0 _col0 (type: string)
                          1 _col0 (type: string)
    - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/create_like.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/create_like.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/create_like.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/create_like.q.out Wed Apr 1 01:15:50 2015
    @@ -331,9 +331,9 @@ POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors
      # col_name data_type comment

    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    +number int Order of playing the role
    +first_name string first name of actor playing role
    +last_name string last name of actor playing role

      # Detailed Table Information
      Database: default
    @@ -380,9 +380,9 @@ POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors2
      # col_name data_type comment

    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    +number int Order of playing the role
    +first_name string first name of actor playing role
    +last_name string last name of actor playing role

      # Detailed Table Information
      Database: default

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/ctas_colname.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/ctas_colname.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/ctas_colname.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/ctas_colname.q.out Wed Apr 1 01:15:50 2015
    @@ -174,15 +174,11 @@ STAGE PLANS:
                TableScan
                  alias: src1
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: key (type: string), value (type: string)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: key (type: string), value (type: string)
    + sort order: ++
    + Map-reduce partition columns: key (type: string)
                    Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: string)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)
    @@ -340,15 +336,11 @@ STAGE PLANS:
                TableScan
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: key (type: string), value (type: string)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: key (type: string), value (type: string)
    + sort order: ++
    + Map-reduce partition columns: key (type: string)
                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: string)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string)

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/decimal_udf.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/decimal_udf.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/decimal_udf.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/decimal_udf.q.out Wed Apr 1 01:15:50 2015
    @@ -649,7 +649,7 @@ STAGE PLANS:
                alias: decimal_udf
                Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
                Filter Operator
    - predicate: ((key * CAST( value AS decimal(10,0))) > CAST( 0 AS decimal(31,10))) (type: boolean)
    + predicate: ((key * CAST( value AS decimal(10,0))) > 0) (type: boolean)
                  Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE
                  Select Operator
                    expressions: key (type: decimal(20,10)), value (type: int)
    @@ -907,7 +907,7 @@ STAGE PLANS:
                alias: decimal_udf
                Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
                Select Operator
    - expressions: (key / CAST( 0 AS decimal(10,0))) (type: decimal(22,12))
    + expressions: (key / 0) (type: decimal(22,12))
                  outputColumnNames: _col0
                  Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
                  Limit
    @@ -973,7 +973,7 @@ STAGE PLANS:
                alias: decimal_udf
                Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
                Filter Operator
    - predicate: (key <> CAST( 0 AS decimal(20,10))) (type: boolean)
    + predicate: (key <> 0) (type: boolean)
                  Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
                  Select Operator
                    expressions: (key / key) (type: decimal(38,24))
    @@ -1808,7 +1808,7 @@ STAGE PLANS:
                alias: decimal_udf
                Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
                Select Operator
    - expressions: ((key + CAST( 1 AS decimal(10,0))) % (key / CAST( 2 AS decimal(10,0)))) (type: decimal(22,12))
    + expressions: ((key + 1) % (key / 2)) (type: decimal(22,12))
                  outputColumnNames: _col0
                  Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
                  ListSink

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/decimal_udf2.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/decimal_udf2.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/decimal_udf2.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/decimal_udf2.q.out Wed Apr 1 01:15:50 2015
    @@ -44,10 +44,10 @@ STAGE PLANS:
                  alias: decimal_udf2
                  Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (key = CAST( 10 AS decimal(20,10))) (type: boolean)
    + predicate: (key = 10) (type: boolean)
                    Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
    - expressions: acos(key) (type: double), asin(key) (type: double), atan(key) (type: double), cos(key) (type: double), sin(key) (type: double), tan(key) (type: double), radians(key) (type: double)
    + expressions: NaN (type: double), NaN (type: double), 1.4711276743037347 (type: double), -0.8390715290764524 (type: double), -0.5440211108893698 (type: double), 0.6483608274590866 (type: double), 0.17453292519943295 (type: double)
                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                      Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE
                      File Output Operator
    @@ -101,10 +101,10 @@ STAGE PLANS:
                  alias: decimal_udf2
                  Statistics: Num rows: 3 Data size: 359 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (key = CAST( 10 AS decimal(20,10))) (type: boolean)
    + predicate: (key = 10) (type: boolean)
                    Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
    - expressions: exp(key) (type: double), ln(key) (type: double), log(key) (type: double), log(key, key) (type: double), log(key, value) (type: double), log(value, key) (type: double), log10(key) (type: double), sqrt(key) (type: double)
    + expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double)
                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                      Statistics: Num rows: 1 Data size: 119 Basic stats: COMPLETE Column stats: NONE
                      File Output Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out Wed Apr 1 01:15:50 2015
    @@ -338,7 +338,7 @@ STAGE PLANS:
                    name: default.src
                  name: default.src
            Truncated Path -> Alias:
    - /src [$hdt$_0:src]
    + /src [src]
            Needs Tagging: false
            Reduce Operator Tree:
              Select Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/explain_logical.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/explain_logical.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/explain_logical.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/explain_logical.q.out Wed Apr 1 01:15:50 2015
    @@ -100,7 +100,7 @@ TOK_QUERY


      LOGICAL PLAN:
    -$hdt$_0:$hdt$_0:srcpart
    +$hdt$_0:srcpart
        TableScan (TS_0)
          alias: srcpart
          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    @@ -108,25 +108,25 @@ $hdt$_0:$hdt$_0:srcpart
            expressions: key (type: string)
            outputColumnNames: _col0
            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator (GBY_5)
    + Group By Operator (GBY_4)
              aggregations: count(1)
              keys: _col0 (type: string)
              mode: hash
              outputColumnNames: _col0, _col1
              Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator (RS_6)
    + Reduce Output Operator (RS_5)
                key expressions: _col0 (type: string)
                sort order: +
                Map-reduce partition columns: _col0 (type: string)
                Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
                value expressions: _col1 (type: bigint)
    - Group By Operator (GBY_7)
    + Group By Operator (GBY_6)
                  aggregations: count(VALUE._col0)
                  keys: KEY._col0 (type: string)
                  mode: mergepartial
                  outputColumnNames: _col0, _col1
                  Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator (FS_9)
    + File Output Operator (FS_8)
                    compressed: false
                    Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                    table:
    @@ -180,7 +180,7 @@ TOK_QUERY


      LOGICAL PLAN:
    -$hdt$_0:$hdt$_0:src
    +$hdt$_0:src
        TableScan (TS_0)
          alias: src
          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -188,25 +188,25 @@ $hdt$_0:$hdt$_0:src
            expressions: key (type: string)
            outputColumnNames: _col0
            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Group By Operator (GBY_4)
    + Group By Operator (GBY_3)
              aggregations: count(1)
              keys: _col0 (type: string)
              mode: hash
              outputColumnNames: _col0, _col1
              Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator (RS_5)
    + Reduce Output Operator (RS_4)
                key expressions: _col0 (type: string)
                sort order: +
                Map-reduce partition columns: _col0 (type: string)
                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                value expressions: _col1 (type: bigint)
    - Group By Operator (GBY_6)
    + Group By Operator (GBY_5)
                  aggregations: count(VALUE._col0)
                  keys: KEY._col0 (type: string)
                  mode: mergepartial
                  outputColumnNames: _col0, _col1
                  Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator (FS_8)
    + File Output Operator (FS_7)
                    compressed: false
                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                    table:
    @@ -286,16 +286,16 @@ null-subquery1:$hdt$_0-subquery1:src
            expressions: key (type: string), value (type: string)
            outputColumnNames: _col0, _col1
            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Union (UNION_6)
    + Union (UNION_5)
              Statistics: Num rows: 2500 Data size: 26560 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator (FS_8)
    + File Output Operator (FS_7)
                compressed: false
                Statistics: Num rows: 2500 Data size: 26560 Basic stats: COMPLETE Column stats: NONE
                table:
                    input format: org.apache.hadoop.mapred.TextInputFormat
                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -null-subquery2:$hdt$_0-subquery2:$hdt$_0:srcpart
    +null-subquery2:$hdt$_0-subquery2:srcpart
        TableScan (TS_2)
          alias: srcpart
          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    @@ -303,7 +303,7 @@ null-subquery2:$hdt$_0-subquery2:$hdt$_0
            expressions: key (type: string), value (type: string)
            outputColumnNames: _col0, _col1
            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    - Union (UNION_6)
    + Union (UNION_5)
              Statistics: Num rows: 2500 Data size: 26560 Basic stats: COMPLETE Column stats: NONE

      PREHOOK: query: EXPLAIN LOGICAL
    @@ -357,11 +357,11 @@ TOK_QUERY


      LOGICAL PLAN:
    -$hdt$_0:$hdt$_0:s2
    +$hdt$_0:s2
        TableScan (TS_0)
          alias: s2
          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    - Filter Operator (FIL_13)
    + Filter Operator (FIL_12)
            predicate: key is not null (type: boolean)
            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
            Select Operator (SEL_2)
    @@ -386,18 +386,18 @@ $hdt$_0:$hdt$_0:s2
                    expressions: _col3 (type: string), _col1 (type: string)
                    outputColumnNames: _col0, _col1
                    Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator (FS_12)
    + File Output Operator (FS_11)
                      compressed: false
                      Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
                      table:
                          input format: org.apache.hadoop.mapred.TextInputFormat
                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -$hdt$_0:$hdt$_1:s1
    +$hdt$_1:s1
        TableScan (TS_3)
          alias: s1
          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Filter Operator (FIL_14)
    + Filter Operator (FIL_13)
            predicate: key is not null (type: boolean)
            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
            Select Operator (SEL_4)
    @@ -472,7 +472,7 @@ TOK_QUERY


      LOGICAL PLAN:
    -$hdt$_0:srcpart
    +srcpart
        TableScan (TS_0)
          alias: srcpart
          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    @@ -480,7 +480,7 @@ $hdt$_0:srcpart
            expressions: ds (type: string), key (type: string), value (type: string)
            outputColumnNames: _col0, _col1, _col2
            Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    - ListSink (OP_6)
    + ListSink (OP_5)

      PREHOOK: query: EXPLAIN LOGICAL SELECT * FROM V3
      PREHOOK: type: QUERY
    @@ -503,23 +503,23 @@ TOK_QUERY


      LOGICAL PLAN:
    -$hdt$_0:$hdt$_0:srcpart
    +$hdt$_0:srcpart
        TableScan (TS_0)
          alias: srcpart
          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    - Filter Operator (FIL_13)
    + Filter Operator (FIL_12)
            predicate: key is not null (type: boolean)
            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
            Select Operator (SEL_2)
              expressions: key (type: string)
              outputColumnNames: _col0
              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator (RS_7)
    + Reduce Output Operator (RS_6)
                key expressions: _col0 (type: string)
                sort order: +
                Map-reduce partition columns: _col0 (type: string)
                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
    - Join Operator (JOIN_10)
    + Join Operator (JOIN_9)
                  condition map:
                       Inner Join 0 to 1
                  keys:
    @@ -527,11 +527,11 @@ $hdt$_0:$hdt$_0:srcpart
                    1 _col0 (type: string)
                  outputColumnNames: _col0, _col2
                  Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
    - Select Operator (SEL_11)
    + Select Operator (SEL_10)
                    expressions: _col0 (type: string), _col2 (type: string)
                    outputColumnNames: _col0, _col1
                    Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator (FS_12)
    + File Output Operator (FS_11)
                      compressed: false
                      Statistics: Num rows: 1100 Data size: 11686 Basic stats: COMPLETE Column stats: NONE
                      table:
    @@ -539,23 +539,23 @@ $hdt$_0:$hdt$_0:srcpart
                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
      $hdt$_1:src2
    - TableScan (TS_4)
    + TableScan (TS_3)
          alias: src2
          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Filter Operator (FIL_14)
    + Filter Operator (FIL_13)
            predicate: key is not null (type: boolean)
            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    - Select Operator (SEL_5)
    + Select Operator (SEL_4)
              expressions: key (type: string), value (type: string)
              outputColumnNames: _col0, _col1
              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator (RS_9)
    + Reduce Output Operator (RS_8)
                key expressions: _col0 (type: string)
                sort order: +
                Map-reduce partition columns: _col0 (type: string)
                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                value expressions: _col1 (type: string)
    - Join Operator (JOIN_10)
    + Join Operator (JOIN_9)
                  condition map:
                       Inner Join 0 to 1
                  keys:
    @@ -585,24 +585,24 @@ TOK_QUERY


      LOGICAL PLAN:
    -$hdt$_0:$hdt$_0:$hdt$_0:srcpart
    +$hdt$_0:srcpart
        TableScan (TS_0)
          alias: srcpart
          Statistics: Num rows: 2000 Data size: 21248 Basic stats: COMPLETE Column stats: NONE
    - Filter Operator (FIL_18)
    + Filter Operator (FIL_16)
            predicate: key is not null (type: boolean)
            Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
            Select Operator (SEL_2)
              expressions: key (type: string), value (type: string)
              outputColumnNames: _col0, _col1
              Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator (RS_9)
    + Reduce Output Operator (RS_8)
                key expressions: _col0 (type: string)
                sort order: +
                Map-reduce partition columns: _col0 (type: string)
                Statistics: Num rows: 1000 Data size: 10624 Basic stats: COMPLETE Column stats: NONE
                value expressions: _col1 (type: string)
    - Join Operator (JOIN_14)
    + Join Operator (JOIN_13)
                  condition map:
                       Inner Join 0 to 1
                       Inner Join 0 to 2
    @@ -612,34 +612,34 @@ $hdt$_0:$hdt$_0:$hdt$_0:srcpart
                    2 _col0 (type: string)
                  outputColumnNames: _col1, _col2, _col4
                  Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
    - Select Operator (SEL_15)
    + Select Operator (SEL_14)
                    expressions: _col2 (type: string), _col1 (type: string), _col4 (type: string)
                    outputColumnNames: _col0, _col1, _col2
                    Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator (FS_17)
    + File Output Operator (FS_15)
                      compressed: false
                      Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
                      table:
                          input format: org.apache.hadoop.mapred.TextInputFormat
                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                          serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -$hdt$_0:$hdt$_1:src
    - TableScan (TS_4)
    +$hdt$_1:src
    + TableScan (TS_3)
          alias: src
          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Filter Operator (FIL_19)
    + Filter Operator (FIL_17)
            predicate: key is not null (type: boolean)
            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    - Select Operator (SEL_5)
    + Select Operator (SEL_4)
              expressions: key (type: string)
              outputColumnNames: _col0
              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator (RS_11)
    + Reduce Output Operator (RS_10)
                key expressions: _col0 (type: string)
                sort order: +
                Map-reduce partition columns: _col0 (type: string)
                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    - Join Operator (JOIN_14)
    + Join Operator (JOIN_13)
                  condition map:
                       Inner Join 0 to 1
                       Inner Join 0 to 2
    @@ -649,24 +649,24 @@ $hdt$_0:$hdt$_1:src
                    2 _col0 (type: string)
                  outputColumnNames: _col1, _col2, _col4
                  Statistics: Num rows: 2200 Data size: 23372 Basic stats: COMPLETE Column stats: NONE
    -$hdt$_0:$hdt$_2:src
    - TableScan (TS_6)
    +$hdt$_2:src
    + TableScan (TS_5)
          alias: src
          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Filter Operator (FIL_20)
    + Filter Operator (FIL_18)
            predicate: key is not null (type: boolean)
            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    - Select Operator (SEL_7)
    + Select Operator (SEL_6)
              expressions: key (type: string), value (type: string)
              outputColumnNames: _col0, _col1
              Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator (RS_13)
    + Reduce Output Operator (RS_12)
                key expressions: _col0 (type: string)
                sort order: +
                Map-reduce partition columns: _col0 (type: string)
                Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                value expressions: _col1 (type: string)
    - Join Operator (JOIN_14)
    + Join Operator (JOIN_13)
                  condition map:
                       Inner Join 0 to 1
                       Inner Join 0 to 2
    @@ -800,11 +800,11 @@ TOK_QUERY


      LOGICAL PLAN:
    -$hdt$_0:$hdt$_0:src
    +$hdt$_0:src
        TableScan (TS_0)
          alias: src
          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Filter Operator (FIL_19)
    + Filter Operator (FIL_18)
            predicate: key is not null (type: boolean)
            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
            Select Operator (SEL_1)
    @@ -829,27 +829,27 @@ $hdt$_0:$hdt$_0:src
                    expressions: _col0 (type: string), _col3 (type: bigint), _col1 (type: string)
                    outputColumnNames: _col0, _col1, _col2
                    Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator (RS_16)
    + Reduce Output Operator (RS_15)
                      key expressions: _col0 (type: string)
                      sort order: +
                      Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                      value expressions: _col1 (type: bigint), _col2 (type: string)
    - Select Operator (SEL_17)
    + Select Operator (SEL_16)
                        expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: bigint), VALUE._col1 (type: string)
                        outputColumnNames: _col0, _col1, _col2
                        Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator (FS_18)
    + File Output Operator (FS_17)
                          compressed: false
                          Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
                          table:
                              input format: org.apache.hadoop.mapred.TextInputFormat
                              output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                              serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -$hdt$_0:$hdt$_1:$hdt$_1:src
    +$hdt$_1:$hdt$_1:src
        TableScan (TS_2)
          alias: src
          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Filter Operator (FIL_20)
    + Filter Operator (FIL_19)
            predicate: key is not null (type: boolean)
            Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
            Select Operator (SEL_3)

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/groupby_map_ppr.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/groupby_map_ppr.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/groupby_map_ppr.q.out Wed Apr 1 01:15:50 2015
    @@ -222,8 +222,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:src]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:src]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:src]
    + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:src]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out Wed Apr 1 01:15:50 2015
    @@ -239,8 +239,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:src]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:src]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:src]
    + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:src]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/groupby_ppr.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/groupby_ppr.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/groupby_ppr.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/groupby_ppr.q.out Wed Apr 1 01:15:50 2015
    @@ -215,8 +215,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:src]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:src]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:src]
    + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:src]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out Wed Apr 1 01:15:50 2015
    @@ -228,8 +228,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:src]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:src]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:src]
    + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:src]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
  • Prasanthj at Apr 1, 2015 at 1:15 am
    Author: prasanthj
    Date: Wed Apr 1 01:15:50 2015
    New Revision: 1670534

    URL: http://svn.apache.org/r1670534
    Log:
    Merge from trunk to llap (3/31/2015) (Prasanth Jayachandran)

    Added:
         hive/branches/llap/conf/ivysettings.xml
           - copied unchanged from r1670533, hive/trunk/conf/ivysettings.xml
         hive/branches/llap/data/files/HiveGroup.parquet
           - copied unchanged from r1670533, hive/trunk/data/files/HiveGroup.parquet
         hive/branches/llap/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java
           - copied unchanged from r1670533, hive/trunk/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java
         hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/session/DependencyResolver.java
           - copied unchanged from r1670533, hive/trunk/ql/src/java/org/apache/hadoop/hive/ql/session/DependencyResolver.java
         hive/branches/llap/ql/src/test/org/apache/hadoop/hive/ql/session/TestAddResource.java
           - copied unchanged from r1670533, hive/trunk/ql/src/test/org/apache/hadoop/hive/ql/session/TestAddResource.java
         hive/branches/llap/ql/src/test/queries/clientnegative/ivyDownload.q
           - copied unchanged from r1670533, hive/trunk/ql/src/test/queries/clientnegative/ivyDownload.q
         hive/branches/llap/ql/src/test/queries/clientpositive/avro_comments.q
           - copied unchanged from r1670533, hive/trunk/ql/src/test/queries/clientpositive/avro_comments.q
         hive/branches/llap/ql/src/test/queries/clientpositive/ivyDownload.q
           - copied unchanged from r1670533, hive/trunk/ql/src/test/queries/clientpositive/ivyDownload.q
         hive/branches/llap/ql/src/test/queries/clientpositive/lateral_view_onview.q
           - copied unchanged from r1670533, hive/trunk/ql/src/test/queries/clientpositive/lateral_view_onview.q
         hive/branches/llap/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
           - copied unchanged from r1670533, hive/trunk/ql/src/test/queries/clientpositive/parquet_schema_evolution.q
         hive/branches/llap/ql/src/test/queries/clientpositive/parquet_table_with_subschema.q
           - copied unchanged from r1670533, hive/trunk/ql/src/test/queries/clientpositive/parquet_table_with_subschema.q
         hive/branches/llap/ql/src/test/results/clientnegative/ivyDownload.q.out
           - copied unchanged from r1670533, hive/trunk/ql/src/test/results/clientnegative/ivyDownload.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_comments.q.out
           - copied unchanged from r1670533, hive/trunk/ql/src/test/results/clientpositive/avro_comments.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/ivyDownload.q.out
           - copied unchanged from r1670533, hive/trunk/ql/src/test/results/clientpositive/ivyDownload.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/lateral_view_onview.q.out
           - copied unchanged from r1670533, hive/trunk/ql/src/test/results/clientpositive/lateral_view_onview.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/parquet_schema_evolution.q.out
           - copied unchanged from r1670533, hive/trunk/ql/src/test/results/clientpositive/parquet_schema_evolution.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/parquet_table_with_subschema.q.out
           - copied unchanged from r1670533, hive/trunk/ql/src/test/results/clientpositive/parquet_table_with_subschema.q.out
    Modified:
         hive/branches/llap/ (props changed)
         hive/branches/llap/beeline/src/java/org/apache/hive/beeline/BeeLine.java
         hive/branches/llap/hcatalog/hcatalog-pig-adapter/pom.xml
         hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml
         hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/env.sh
         hive/branches/llap/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml
         hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
         hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
         hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TempletonDelegator.java
         hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java
         hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LaunchMapper.java
         hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
         hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TrivialExecService.java
         hive/branches/llap/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHiveAuthFactory.java
         hive/branches/llap/itests/pom.xml
         hive/branches/llap/packaging/src/main/assembly/bin.xml
         hive/branches/llap/pom.xml
         hive/branches/llap/ql/pom.xml
         hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java
         hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
         hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
         hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
         hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
         hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
         hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
         hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
         hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
         hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFormatNumber.java
         hive/branches/llap/ql/src/test/queries/clientpositive/avro_compression_enabled.q
         hive/branches/llap/ql/src/test/queries/clientpositive/avro_evolved_schemas.q
         hive/branches/llap/ql/src/test/queries/clientpositive/avro_joins.q
         hive/branches/llap/ql/src/test/queries/clientpositive/leadlag.q
         hive/branches/llap/ql/src/test/queries/clientpositive/parquet_columnar.q
         hive/branches/llap/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
         hive/branches/llap/ql/src/test/queries/clientpositive/udf_format_number.q
         hive/branches/llap/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/annotate_stats_select.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/auto_join1.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/auto_join10.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/auto_join11.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/auto_join12.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/auto_join13.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/auto_join14.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/auto_join22.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/auto_join26.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/auto_join_nulls.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column2.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column3.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_change_schema.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_compression_enabled.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_decimal.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_decimal_native.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_evolved_schemas.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_joins.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_joins_native.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_native.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_partitioned.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_partitioned_native.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_sanity_test.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_schema_evolution_native.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/avro_schema_literal.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/combine2.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer1.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer12.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/correlationoptimizer3.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/create_like.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/ctas_colname.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/decimal_udf.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/decimal_udf2.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/encrypted/encryption_insert_partition_dynamic.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/explain_logical.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/groupby_map_ppr.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/groupby_map_ppr_multi_distinct.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/groupby_ppr.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/groupby_ppr_multi_distinct.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_6.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/index_serde.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/input_part1.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/join28.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/join29.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/join31.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/join32.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/join32_lessSize.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/join33.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/join35.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/leadlag.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/literal_decimal.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/load_dyn_part14.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/louter_join_ppr.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/mapjoin_subquery.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/multiMapJoin2.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/parquet_columnar.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/pcr.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/ppd_vc.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/skewjoinopt10.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_joins.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_joins_native.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/spark/skewjoinopt10.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out
         hive/branches/llap/ql/src/test/results/clientpositive/subquery_in.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/subquery_in_having.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/subquery_notin.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/subquery_views.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/table_access_keys_stats.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/tez/subquery_in.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_udf2.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/udf7.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/udf_format_number.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/udf_reflect2.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/union24.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/unionDistinct_1.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_round.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_udf.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_udf2.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/vector_mapjoin_reduce.q.out
         hive/branches/llap/ql/src/test/results/clientpositive/windowing_streaming.q.out
         hive/branches/llap/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroObjectInspectorGenerator.java
         hive/branches/llap/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerDe.java
         hive/branches/llap/service/src/java/org/apache/hive/service/auth/HiveAuthFactory.java
         hive/branches/llap/shims/common/src/main/java/org/apache/hadoop/hive/shims/ShimLoader.java

    Propchange: hive/branches/llap/
    ------------------------------------------------------------------------------
    --- svn:mergeinfo (original)
    +++ svn:mergeinfo Wed Apr 1 01:15:50 2015
    @@ -4,4 +4,4 @@
      /hive/branches/spark:1608589-1660298
      /hive/branches/tez:1494760-1622766
      /hive/branches/vectorization:1466908-1527856
    -/hive/trunk:1624170-1669495
    +/hive/trunk:1624170-1670533

    Modified: hive/branches/llap/beeline/src/java/org/apache/hive/beeline/BeeLine.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/beeline/src/java/org/apache/hive/beeline/BeeLine.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/beeline/src/java/org/apache/hive/beeline/BeeLine.java (original)
    +++ hive/branches/llap/beeline/src/java/org/apache/hive/beeline/BeeLine.java Wed Apr 1 01:15:50 2015
    @@ -803,10 +803,14 @@ public class BeeLine implements Closeabl
        }

        private int execute(ConsoleReader reader, boolean exitOnError) {
    + String line;
          while (!exit) {
            try {
              // Execute one instruction; terminate on executing a script if there is an error
    - if (!dispatch(reader.readLine(getPrompt())) && exitOnError) {
    + // in silent mode, prevent the query and prompt being echoed back to terminal
    + line = getOpts().isSilent() ? reader.readLine(null, ConsoleReader.NULL_MASK) : reader.readLine(getPrompt());
    +
    + if (!dispatch(line) && exitOnError) {
                return ERRNO_OTHER;
              }
            } catch (Throwable t) {

    Modified: hive/branches/llap/hcatalog/hcatalog-pig-adapter/pom.xml
    URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/hcatalog-pig-adapter/pom.xml?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/hcatalog/hcatalog-pig-adapter/pom.xml (original)
    +++ hive/branches/llap/hcatalog/hcatalog-pig-adapter/pom.xml Wed Apr 1 01:15:50 2015
    @@ -68,7 +68,6 @@
          </dependency>
        </dependencies>

    -
        <profiles>
          <profile>
            <id>hadoop-1</id>
    @@ -79,6 +78,12 @@
                <version>${hadoop-20S.version}</version>
              </dependency>
              <dependency>
    + <groupId>org.apache.hadoop</groupId>
    + <artifactId>hadoop-test</artifactId>
    + <version>${hadoop-20S.version}</version>
    + <scope>test</scope>
    + </dependency>
    + <dependency>
                <groupId>org.apache.pig</groupId>
                <artifactId>pig</artifactId>
                <version>${pig.version}</version>
    @@ -102,6 +107,11 @@
              </dependency>
              <dependency>
                <groupId>org.apache.hadoop</groupId>
    + <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
    + <version>${hadoop-23.version}</version>
    + </dependency>
    + <dependency>
    + <groupId>org.apache.hadoop</groupId>
                <artifactId>hadoop-mapreduce-client-core</artifactId>
                <version>${hadoop-23.version}</version>
              </dependency>
    @@ -112,6 +122,12 @@
                <classifier>h2</classifier>
              </dependency>
              <dependency>
    + <groupId>org.apache.hadoop</groupId>
    + <artifactId>hadoop-hdfs</artifactId>
    + <version>${hadoop-23.version}</version>
    + <scope>test</scope>
    + </dependency>
    + <dependency>
                <!--this should be automatically brought in by Pig, it's not in Pig 0.12 due to a bug
                    in Pig which requires it This is fixed in Pig's pom file in ASF trunk (pig 13)-->
                <groupId>joda-time</groupId>
    @@ -121,11 +137,30 @@
              <!-- Test dependencies -->
              <dependency>
                <groupId>org.apache.hadoop</groupId>
    + <artifactId>hadoop-hdfs</artifactId>
    + <version>${hadoop-23.version}</version>
    + <classifier>tests</classifier>
    + <scope>test</scope>
    + </dependency>
    + <dependency>
    + <groupId>org.apache.hadoop</groupId>
                <artifactId>hadoop-mapreduce-client-common</artifactId>
                <version>${hadoop-23.version}</version>
                <optional>true</optional>
                <scope>test</scope>
              </dependency>
    + <dependency>
    + <groupId>org.apache.hadoop</groupId>
    + <artifactId>hadoop-common</artifactId>
    + <version>${hadoop-23.version}</version>
    + <classifier>tests</classifier>
    + <scope>test</scope>
    + </dependency>
    + <dependency>
    + <groupId>com.sun.jersey</groupId>
    + <artifactId>jersey-servlet</artifactId>
    + <scope>test</scope>
    + </dependency>
            </dependencies>
          </profile>
        </profiles>

    Modified: hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml
    URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml (original)
    +++ hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/config/webhcat/webhcat-site.xml Wed Apr 1 01:15:50 2015
    @@ -35,7 +35,7 @@

          <property>
              <name>templeton.libjars</name>
    - <value>${env.TEMPLETON_HOME}/../lib/zookeeper-3.4.5.jar</value>
    + <value>${env.TEMPLETON_HOME}/../lib/zookeeper-3.4.6.jar,${env.TEMPLETON_HOME}/../lib/hive-common-1.2.0-SNAPSHOT.jar</value>
              <description>Jars to add to the classpath.</description>
          </property>

    @@ -69,6 +69,11 @@
                  shipped to the target node in the cluster to execute Pig job which uses
                  HCat, Hive query, etc.</description>
          </property>
    +
    + <property>
    + <name>templeton.hive.extra.files</name>
    + <value>${env.TEZ_CLIENT_HOME}/conf/tez-site.xml,${env.TEZ_CLIENT_HOME}/,${env.TEZ_CLIENT_HOME}/lib</value>
    + </property>
          <property>
              <name>templeton.hcat.home</name>
              <value>apache-hive-${env.HIVE_VERSION}-bin.tar.gz/apache-hive-${env.HIVE_VERSION}-bin/hcatalog</value>
    @@ -101,7 +106,7 @@
          </property>

          <property>
    - <!--\,thrift://127.0.0.1:9933-->
    + <!--\,thrift://127.0.0.1:9933,,hive.execution.engine=tez-->
              <name>templeton.hive.properties</name>
              <value>hive.metastore.uris=thrift://localhost:9933,hive.metastore.sasl.enabled=false</value>
          </property>

    Modified: hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/env.sh
    URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/env.sh?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/env.sh (original)
    +++ hive/branches/llap/hcatalog/src/test/e2e/templeton/deployers/env.sh Wed Apr 1 01:15:50 2015
    @@ -36,6 +36,10 @@ if [ -z ${PIG_VERSION} ]; then
        export PIG_VERSION=0.12.2-SNAPSHOT
      fi

    +if [ -z ${TEZ_VERSION} ]; then
    + export TEZ_VERSION=0.5.3
    +fi
    +
      #Root of project source tree
      if [ -z ${PROJ_HOME} ]; then
        export PROJ_HOME=/Users/${USER}/dev/hive
    @@ -46,6 +50,7 @@ if [ -z ${HADOOP_HOME} ]; then
        export HADOOP_HOME=/Users/${USER}/dev/hwxhadoop/hadoop-dist/target/hadoop-${HADOOP_VERSION}
      fi

    +export TEZ_CLIENT_HOME=/Users/ekoifman/dev/apache-tez-client-${TEZ_VERSION}
      #Make sure Pig is built for the Hadoop version you are running
      export PIG_TAR_PATH=/Users/${USER}/dev/pig-${PIG_VERSION}-src/build
      #this is part of Pig distribution

    Modified: hive/branches/llap/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml
    URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml (original)
    +++ hive/branches/llap/hcatalog/webhcat/svr/src/main/config/webhcat-default.xml Wed Apr 1 01:15:50 2015
    @@ -39,7 +39,7 @@

        <property>
          <name>templeton.libjars</name>
    - <value>${env.TEMPLETON_HOME}/share/webhcat/svr/lib/zookeeper-3.4.3.jar</value>
    + <value>${env.TEMPLETON_HOME}/../lib/zookeeper-3.4.6.jar,${env.TEMPLETON_HOME}/../lib/hive-common-1.2.0-SNAPSHOT.jar</value>
          <description>Jars to add to the classpath.</description>
        </property>

    @@ -106,7 +106,20 @@
        <property>
          <name>templeton.hive.path</name>
          <value>hive-0.11.0.tar.gz/hive-0.11.0/bin/hive</value>
    - <description>The path to the Hive executable.</description>
    + <description>The path to the Hive executable. Applies only if templeton.hive.archive is defined.</description>
    + </property>
    +
    + <property>
    + <name>templeton.hive.extra.files</name>
    + <value>/tez-client/conf/tez-site.xml,/tez-client/,/tez-client/lib</value>
    + <description>The resources in this list will be localized to the node running LaunchMapper and added to HADOOP_CLASSPTH
    + before launching 'hive' command. If the path /foo/bar is a directory, the contents of the the entire dir will be localized
    + and ./bar/* will be added to HADOOP_CLASSPATH. Note that since classpath path processing does not recurse into subdirectories,
    + the paths in this property may be overlapping. In the example above, "./tez-site.xml:./tez-client/*:./lib/*" will be added to
    + HADOOP_CLASSPATH.
    + This can be used to specify config files, Tez artifacts, etc. This will be sent -files option of hadoop jar command thus
    + each path is interpreted by Generic Option Parser. It can be local or hdfs path.
    + </description>
        </property>

        <property>
    @@ -197,6 +210,32 @@
          </description>
        </property>

    + <!--
    + <property>
    + <name>templeton.controller.mr.am.java.opts</name>
    + <value></value>
    + <description>Java options to be set for the templeton controller job's
    + MapReduce application master. When submitting the controller job,
    + Templeton will override yarn.app.mapreduce.am.command-opts with
    + this value. If this is not specified, Templeton will not set the
    + property and therefore the value will be picked up from
    + mapred-site.xml.
    + </description>
    + </property>
    +
    + <property>
    + <name>templeton.mr.am.memory.mb</name>
    + <value></value>
    + <description>Templeton controller job's Application Master's memory
    + limit in MB. When submitting controller job, Templeton will
    + overwrite yarn.app.mapreduce.am.resource.mb with this value. If
    + empty, Templeton will not set yarn.app.mapreduce.am.resource.mb
    + when submitting the controller job, therefore the configuration
    + in mapred-site.xml will be used.
    + </description>
    + </property>
    + -->
    +
        <property>
          <name>templeton.exec.envs</name>
          <value>HADOOP_PREFIX,HADOOP_HOME,JAVA_HOME,HIVE_HOME</value>

    Modified: hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java (original)
    +++ hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java Wed Apr 1 01:15:50 2015
    @@ -35,7 +35,6 @@ import org.apache.commons.logging.LogFac
      import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.fs.Path;
      import org.apache.hadoop.hive.conf.HiveConf;
    -import org.apache.hadoop.hive.conf.SystemVariables;
      import org.apache.hadoop.util.StringUtils;
      import org.apache.hadoop.util.VersionInfo;
      import org.apache.hive.hcatalog.templeton.tool.JobState;
    @@ -104,6 +103,8 @@ public class AppConfig extends Configura
        public static final String HIVE_ARCHIVE_NAME = "templeton.hive.archive";
        public static final String HIVE_PATH_NAME = "templeton.hive.path";
        public static final String MAPPER_MEMORY_MB = "templeton.mapper.memory.mb";
    + public static final String MR_AM_MEMORY_MB = "templeton.mr.am.memory.mb";
    +
        /**
         * see webhcat-default.xml
         */
    @@ -130,6 +131,8 @@ public class AppConfig extends Configura
        public static final String OVERRIDE_JARS_ENABLED = "templeton.override.enabled";
        public static final String TEMPLETON_CONTROLLER_MR_CHILD_OPTS
          = "templeton.controller.mr.child.opts";
    + public static final String TEMPLETON_CONTROLLER_MR_AM_JAVA_OPTS
    + = "templeton.controller.mr.am.java.opts";

        public static final String KERBEROS_SECRET = "templeton.kerberos.secret";
        public static final String KERBEROS_PRINCIPAL = "templeton.kerberos.principal";
    @@ -148,7 +151,14 @@ public class AppConfig extends Configura
          = "mapred.map.tasks.speculative.execution";
        public static final String HADOOP_CHILD_JAVA_OPTS = "mapred.child.java.opts";
        public static final String HADOOP_MAP_MEMORY_MB = "mapreduce.map.memory.mb";
    + public static final String HADOOP_MR_AM_JAVA_OPTS = "yarn.app.mapreduce.am.command-opts";
    + public static final String HADOOP_MR_AM_MEMORY_MB = "yarn.app.mapreduce.am.resource.mb";
        public static final String UNIT_TEST_MODE = "templeton.unit.test.mode";
    + /**
    + * comma-separated list of artifacts to add to HADOOP_CLASSPATH evn var in
    + * LaunchMapper before launching Hive command
    + */
    + public static final String HIVE_EXTRA_FILES = "templeton.hive.extra.files";


        private static final Log LOG = LogFactory.getLog(AppConfig.class);
    @@ -313,7 +323,13 @@ public class AppConfig extends Configura
        public String controllerMRChildOpts() {
          return get(TEMPLETON_CONTROLLER_MR_CHILD_OPTS);
        }
    + public String controllerAMChildOpts() {
    + return get(TEMPLETON_CONTROLLER_MR_AM_JAVA_OPTS);
    + }
        public String mapperMemoryMb() { return get(MAPPER_MEMORY_MB); }
    + public String amMemoryMb() {
    + return get(MR_AM_MEMORY_MB);
    + }

        /**
         * @see #HIVE_PROPS_NAME

    Modified: hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java (original)
    +++ hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/HiveDelegator.java Wed Apr 1 01:15:50 2015
    @@ -27,6 +27,7 @@ import java.util.List;
      import java.util.Map;

      import org.apache.commons.exec.ExecuteException;
    +import org.apache.hadoop.fs.Path;
      import org.apache.hive.hcatalog.templeton.tool.JobSubmissionConstants;
      import org.apache.hive.hcatalog.templeton.tool.TempletonControllerJob;
      import org.apache.hive.hcatalog.templeton.tool.TempletonUtils;
    @@ -117,7 +118,7 @@ public class HiveDelegator extends Launc
        private List<String> makeBasicArgs(String execute, String srcFile, String otherFiles,
                                               String statusdir, String completedUrl,
                                               boolean enablelog)
    - throws URISyntaxException, FileNotFoundException, IOException,
    + throws URISyntaxException, IOException,
          InterruptedException
        {
          ArrayList<String> args = new ArrayList<String>();
    @@ -142,6 +143,30 @@ public class HiveDelegator extends Launc
            args.add(appConf.hiveArchive());
          }

    + //ship additional artifacts, for example for Tez
    + String extras = appConf.get(AppConfig.HIVE_EXTRA_FILES);
    + if(extras != null && extras.length() > 0) {
    + boolean foundFiles = false;
    + for(int i = 0; i < args.size(); i++) {
    + if(FILES.equals(args.get(i))) {
    + String value = args.get(i + 1);
    + args.set(i + 1, value + "," + extras);
    + foundFiles = true;
    + }
    + }
    + if(!foundFiles) {
    + args.add(FILES);
    + args.add(extras);
    + }
    + String[] extraFiles = appConf.getStrings(AppConfig.HIVE_EXTRA_FILES);
    + StringBuilder extraFileNames = new StringBuilder();
    + //now tell LaunchMapper which files it should add to HADOOP_CLASSPATH
    + for(String file : extraFiles) {
    + Path p = new Path(file);
    + extraFileNames.append(p.getName()).append(",");
    + }
    + addDef(args, JobSubmissionConstants.HADOOP_CLASSPATH_EXTRAS, extraFileNames.toString());
    + }
          return args;
        }
      }

    Modified: hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TempletonDelegator.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TempletonDelegator.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TempletonDelegator.java (original)
    +++ hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/TempletonDelegator.java Wed Apr 1 01:15:50 2015
    @@ -28,6 +28,10 @@ public class TempletonDelegator {
         * http://hadoop.apache.org/docs/r1.0.4/commands_manual.html#Generic+Options
         */
        public static final String ARCHIVES = "-archives";
    + /**
    + * http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/CommandsManual.html#Generic_Options
    + */
    + public static final String FILES = "-files";

        protected AppConfig appConf;


    Modified: hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java (original)
    +++ hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/JobSubmissionConstants.java Wed Apr 1 01:15:50 2015
    @@ -31,6 +31,12 @@ public interface JobSubmissionConstants
        public static final String EXIT_FNAME = "exit";
        public static final int WATCHER_TIMEOUT_SECS = 10;
        public static final int KEEP_ALIVE_MSEC = 60 * 1000;
    + /**
    + * A comma-separated list of files to be added to HADOOP_CLASSPATH in
    + * {@link org.apache.hive.hcatalog.templeton.tool.LaunchMapper}. Used to localize additional
    + * artifacts for job submission requests.
    + */
    + public static final String HADOOP_CLASSPATH_EXTRAS = "templeton.hadoop.classpath.extras";
        /*
         * The = sign in the string for TOKEN_FILE_ARG_PLACEHOLDER is required because
         * org.apache.hadoop.util.GenericOptionsParser.preProcessForWindows() prepares

    Modified: hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LaunchMapper.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LaunchMapper.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LaunchMapper.java (original)
    +++ hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/LaunchMapper.java Wed Apr 1 01:15:50 2015
    @@ -21,6 +21,7 @@ package org.apache.hive.hcatalog.templet
      import org.apache.commons.logging.Log;
      import org.apache.commons.logging.LogFactory;
      import org.apache.hadoop.conf.Configuration;
    +import org.apache.hadoop.fs.FileStatus;
      import org.apache.hadoop.fs.FileSystem;
      import org.apache.hadoop.fs.Path;
      import org.apache.hadoop.hive.common.classification.InterfaceAudience;
    @@ -33,7 +34,6 @@ import org.apache.hadoop.mapreduce.Mappe
      import org.apache.hadoop.security.UserGroupInformation;
      import org.apache.hadoop.util.Shell;
      import org.apache.hadoop.util.StringUtils;
    -import org.apache.hive.hcatalog.templeton.AppConfig;
      import org.apache.hive.hcatalog.templeton.BadParam;
      import org.apache.hive.hcatalog.templeton.LauncherDelegator;

    @@ -115,6 +115,32 @@ public class LaunchMapper extends Mapper
            }
          }
        }
    + private static void handleHadoopClasspathExtras(Configuration conf, Map<String, String> env)
    + throws IOException {
    + if(!TempletonUtils.isset(conf.get(JobSubmissionConstants.HADOOP_CLASSPATH_EXTRAS))) {
    + return;
    + }
    + LOG.debug(HADOOP_CLASSPATH_EXTRAS + "=" + conf.get(HADOOP_CLASSPATH_EXTRAS));
    + String[] files = conf.getStrings(HADOOP_CLASSPATH_EXTRAS);
    + StringBuilder paths = new StringBuilder();
    + FileSystem fs = FileSystem.getLocal(conf);//these have been localized already
    + for(String f : files) {
    + Path p = new Path(f);
    + FileStatus fileStatus = fs.getFileStatus(p);
    + paths.append(f);
    + if(fileStatus.isDirectory()) {
    + paths.append(File.separator).append("*");
    + }
    + paths.append(File.pathSeparator);
    + }
    + paths.setLength(paths.length() - 1);
    + if(TempletonUtils.isset(System.getenv("HADOOP_CLASSPATH"))) {
    + env.put("HADOOP_CLASSPATH", System.getenv("HADOOP_CLASSPATH") + File.pathSeparator + paths);
    + }
    + else {
    + env.put("HADOOP_CLASSPATH", paths.toString());
    + }
    + }
        protected Process startJob(Context context, String user, String overrideClasspath)
          throws IOException, InterruptedException {
          Configuration conf = context.getConfiguration();
    @@ -135,6 +161,7 @@ public class LaunchMapper extends Mapper
          Map<String, String> env = TempletonUtils.hadoopUserEnv(user, overrideClasspath);
          handlePigEnvVars(conf, env);
          handleSqoop(conf, env);
    + handleHadoopClasspathExtras(conf, env);
          List<String> jarArgsList = new LinkedList<String>(Arrays.asList(jarArgs));
          handleTokenFile(jarArgsList, JobSubmissionConstants.TOKEN_FILE_ARG_PLACEHOLDER, "mapreduce.job.credentials.binary");
          handleTokenFile(jarArgsList, JobSubmissionConstants.TOKEN_FILE_ARG_PLACEHOLDER_TEZ, "tez.credentials.path");

    Modified: hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java (original)
    +++ hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TempletonControllerJob.java Wed Apr 1 01:15:50 2015
    @@ -18,9 +18,7 @@
       */
      package org.apache.hive.hcatalog.templeton.tool;

    -import java.io.File;
      import java.io.IOException;
    -import java.net.URI;
      import java.security.PrivilegedExceptionAction;
      import java.util.Arrays;

    @@ -28,13 +26,9 @@ import org.apache.commons.logging.Log;
      import org.apache.commons.logging.LogFactory;
      import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.conf.Configured;
    -import org.apache.hadoop.fs.FileSystem;
    -import org.apache.hadoop.fs.Path;
    -import org.apache.hadoop.fs.permission.FsPermission;
      import org.apache.hadoop.hive.common.classification.InterfaceAudience;
      import org.apache.hadoop.hive.conf.HiveConf;
      import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
    -import org.apache.hadoop.hive.shims.ShimLoader;
      import org.apache.hadoop.io.NullWritable;
      import org.apache.hadoop.io.Text;
      import org.apache.hadoop.mapred.JobClient;
    @@ -47,7 +41,6 @@ import org.apache.hadoop.security.UserGr
      import org.apache.hadoop.security.token.Token;
      import org.apache.hadoop.util.Tool;
      import org.apache.hive.hcatalog.templeton.AppConfig;
    -import org.apache.hive.hcatalog.templeton.Main;
      import org.apache.hive.hcatalog.templeton.SecureProxySupport;
      import org.apache.hive.hcatalog.templeton.UgiFactory;
      import org.apache.thrift.TException;
    @@ -114,6 +107,15 @@ public class TempletonControllerJob exte
          if(memoryMb != null && memoryMb.length() != 0) {
            conf.set(AppConfig.HADOOP_MAP_MEMORY_MB, memoryMb);
          }
    + String amMemoryMB = appConf.amMemoryMb();
    + if (amMemoryMB != null && !amMemoryMB.isEmpty()) {
    + conf.set(AppConfig.HADOOP_MR_AM_MEMORY_MB, amMemoryMB);
    + }
    + String amJavaOpts = appConf.controllerAMChildOpts();
    + if (amJavaOpts != null && !amJavaOpts.isEmpty()) {
    + conf.set(AppConfig.HADOOP_MR_AM_JAVA_OPTS, amJavaOpts);
    + }
    +
          String user = UserGroupInformation.getCurrentUser().getShortUserName();
          conf.set("user.name", user);
          Job job = new Job(conf);

    Modified: hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TrivialExecService.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TrivialExecService.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TrivialExecService.java (original)
    +++ hive/branches/llap/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/tool/TrivialExecService.java Wed Apr 1 01:15:50 2015
    @@ -76,32 +76,31 @@ final class TrivialExecService {
          }
        }
        /**
    - * Print files and directories in current directory. Will list files in the sub-directory (only 1 level deep)
    - * time honored tradition in WebHCat of borrowing from Oozie
    + * Print files and directories in current {@code dir}.
         */
    - private static void printContentsOfDir(String dir) {
    + private static StringBuilder printContentsOfDir(String dir, int depth, StringBuilder sb) {
    + StringBuilder indent = new StringBuilder();
    + for(int i = 0; i < depth; i++) {
    + indent.append("--");
    + }
          File folder = new File(dir);
    - StringBuilder sb = new StringBuilder("Files in '").append(dir).append("' dir:").append(folder.getAbsolutePath()).append('\n');
    + sb.append(indent).append("Files in '").append(dir).append("' dir:").append(folder.getAbsolutePath()).append('\n');

          File[] listOfFiles = folder.listFiles();
    + if(listOfFiles == null) {
    + return sb;
    + }
          for (File fileName : listOfFiles) {
            if (fileName.isFile()) {
    - sb.append("File: ").append(fileName.getName()).append('\n');
    + sb.append(indent).append("File: ").append(fileName.getName()).append('\n');
            }
            else if (fileName.isDirectory()) {
    - sb.append("Dir: ").append(fileName.getName()).append('\n');
    - File subDir = new File(fileName.getName());
    - File[] moreFiles = subDir.listFiles();
    - for (File subFileName : moreFiles) {
    - if (subFileName.isFile()) {
    - sb.append("--File: ").append(subFileName.getName()).append('\n');
    - }
    - else if (subFileName.isDirectory()) {
    - sb.append("--Dir: ").append(subFileName.getName()).append('\n');
    - }
    - }
    + printContentsOfDir(fileName.getName(), depth+1, sb);
            }
          }
    - LOG.info(sb.toString());
    + return sb;
    + }
    + private static void printContentsOfDir(String dir) {
    + LOG.info(printContentsOfDir(dir, 0, new StringBuilder()).toString());
        }
      }

    Modified: hive/branches/llap/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHiveAuthFactory.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHiveAuthFactory.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHiveAuthFactory.java (original)
    +++ hive/branches/llap/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHiveAuthFactory.java Wed Apr 1 01:15:50 2015
    @@ -41,26 +41,48 @@ public class TestHiveAuthFactory {
        }

        /**
    - * Verify that delegation token manager is started with no exception
    + * Verify that delegation token manager is started with no exception for MemoryTokenStore
         * @throws Exception
         */
        @Test
    - public void testStartTokenManager() throws Exception {
    + public void testStartTokenManagerForMemoryTokenStore() throws Exception {
          hiveConf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, HiveAuthFactory.AuthTypes.KERBEROS.getAuthName());
          String principalName = miniHiveKdc.getFullHiveServicePrincipal();
          System.out.println("Principal: " + principalName);
    -
    +
    + hiveConf.setVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL, principalName);
    + String keyTabFile = miniHiveKdc.getKeyTabFile(miniHiveKdc.getHiveServicePrincipal());
    + System.out.println("keyTabFile: " + keyTabFile);
    + Assert.assertNotNull(keyTabFile);
    + hiveConf.setVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB, keyTabFile);
    +
    + HiveAuthFactory authFactory = new HiveAuthFactory(hiveConf);
    + Assert.assertNotNull(authFactory);
    + Assert.assertEquals("org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory",
    + authFactory.getAuthTransFactory().getClass().getName());
    + }
    +
    + /**
    + * Verify that delegation token manager is started with no exception for DBTokenStore
    + * @throws Exception
    + */
    + @Test
    + public void testStartTokenManagerForDBTokenStore() throws Exception {
    + hiveConf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, HiveAuthFactory.AuthTypes.KERBEROS.getAuthName());
    + String principalName = miniHiveKdc.getFullHiveServicePrincipal();
    + System.out.println("Principal: " + principalName);
    +
          hiveConf.setVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL, principalName);
          String keyTabFile = miniHiveKdc.getKeyTabFile(miniHiveKdc.getHiveServicePrincipal());
          System.out.println("keyTabFile: " + keyTabFile);
          Assert.assertNotNull(keyTabFile);
          hiveConf.setVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB, keyTabFile);

    - System.out.println("rawStoreClassName =" + hiveConf.getVar(ConfVars.METASTORE_RAW_STORE_IMPL));
    + hiveConf.setVar(ConfVars.METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS, "org.apache.hadoop.hive.thrift.DBTokenStore");

          HiveAuthFactory authFactory = new HiveAuthFactory(hiveConf);
          Assert.assertNotNull(authFactory);
    - Assert.assertEquals("org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory",
    + Assert.assertEquals("org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge$Server$TUGIAssumingTransportFactory",
              authFactory.getAuthTransFactory().getClass().getName());
        }
      }

    Modified: hive/branches/llap/itests/pom.xml
    URL: http://svn.apache.org/viewvc/hive/branches/llap/itests/pom.xml?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/itests/pom.xml (original)
    +++ hive/branches/llap/itests/pom.xml Wed Apr 1 01:15:50 2015
    @@ -93,6 +93,9 @@
                        mkdir -p $DOWNLOAD_DIR
                        download "http://d3jw87u4immizc.cloudfront.net/spark-tarball/spark-${spark.version}-bin-hadoop2-without-hive.tgz" "spark"
                        cp -f $HIVE_ROOT/data/conf/spark/log4j.properties $BASE_DIR/spark/conf/
    + sed '/package /d' ${basedir}/${hive.path.to.root}/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleAdd.java > /tmp/UDFExampleAdd.java
    + javac -cp ${settings.localRepository}/org/apache/hive/hive-exec/${project.version}/hive-exec-${project.version}.jar /tmp/UDFExampleAdd.java -d /tmp
    + jar -cf /tmp/udfexampleadd-1.0.jar -C /tmp UDFExampleAdd.class
                      </echo>
                    </target>
                  </configuration>

    Modified: hive/branches/llap/packaging/src/main/assembly/bin.xml
    URL: http://svn.apache.org/viewvc/hive/branches/llap/packaging/src/main/assembly/bin.xml?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/packaging/src/main/assembly/bin.xml (original)
    +++ hive/branches/llap/packaging/src/main/assembly/bin.xml Wed Apr 1 01:15:50 2015
    @@ -165,6 +165,7 @@
            <directory>${project.parent.basedir}/conf</directory>
            <includes>
              <include>*.template</include>
    + <include>ivysettings.xml</include>
            </includes>
            <outputDirectory>conf</outputDirectory>
          </fileSet>

    Modified: hive/branches/llap/pom.xml
    URL: http://svn.apache.org/viewvc/hive/branches/llap/pom.xml?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/pom.xml (original)
    +++ hive/branches/llap/pom.xml Wed Apr 1 01:15:50 2015
    @@ -128,6 +128,7 @@
          <!-- httpcomponents are not always in version sync -->
          <httpcomponents.client.version>4.2.5</httpcomponents.client.version>
          <httpcomponents.core.version>4.2.5</httpcomponents.core.version>
    + <ivy.version>2.4.0</ivy.version>
          <jackson.version>1.9.2</jackson.version>
          <javaewah.version>0.3.2</javaewah.version>
          <javolution.version>5.5.1</javolution.version>
    @@ -149,7 +150,7 @@
          <mockito-all.version>1.9.5</mockito-all.version>
          <mina.version>2.0.0-M5</mina.version>
          <netty.version>4.0.23.Final</netty.version>
    - <parquet.version>1.6.0rc3</parquet.version>
    + <parquet.version>1.6.0rc6</parquet.version>
          <pig.version>0.12.0</pig.version>
          <protobuf.version>2.5.0</protobuf.version>
          <stax.version>1.0.1</stax.version>

    Modified: hive/branches/llap/ql/pom.xml
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/pom.xml?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/pom.xml (original)
    +++ hive/branches/llap/ql/pom.xml Wed Apr 1 01:15:50 2015
    @@ -168,6 +168,11 @@
            <version>${libfb303.version}</version>
          </dependency>
          <dependency>
    + <groupId>org.apache.ivy</groupId>
    + <artifactId>ivy</artifactId>
    + <version>${ivy.version}</version>
    + </dependency>
    + <dependency>
            <groupId>org.apache.thrift</groupId>
            <artifactId>libthrift</artifactId>
            <version>${libthrift.version}</version>

    Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java (original)
    +++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java Wed Apr 1 01:15:50 2015
    @@ -18,7 +18,6 @@ import org.apache.hadoop.io.ArrayWritabl
      import parquet.io.api.GroupConverter;
      import parquet.io.api.RecordMaterializer;
      import parquet.schema.GroupType;
    -import parquet.schema.MessageType;
      import parquet.schema.MessageTypeParser;

      import java.util.Map;
    @@ -34,7 +33,7 @@ public class DataWritableRecordConverter

        public DataWritableRecordConverter(final GroupType requestedSchema, final Map<String, String> metadata) {
          this.root = new HiveStructConverter(requestedSchema,
    - MessageTypeParser.parseMessageType(metadata.get(DataWritableReadSupport.HIVE_SCHEMA_KEY)), metadata);
    + MessageTypeParser.parseMessageType(metadata.get(DataWritableReadSupport.HIVE_TABLE_AS_PARQUET_SCHEMA)), metadata);
        }

        @Override

    Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java (original)
    +++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java Wed Apr 1 01:15:50 2015
    @@ -16,6 +16,7 @@ package org.apache.hadoop.hive.ql.io.par
      import java.util.ArrayList;
      import java.util.HashMap;
      import java.util.List;
    +import java.util.ListIterator;
      import java.util.Map;

      import org.apache.hadoop.conf.Configuration;
    @@ -24,17 +25,21 @@ import org.apache.hadoop.hive.ql.io.IOCo
      import org.apache.hadoop.hive.ql.io.parquet.convert.DataWritableRecordConverter;
      import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
      import org.apache.hadoop.hive.serde2.ColumnProjectionUtils;
    +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
    +import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo;
    +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
    +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
      import org.apache.hadoop.io.ArrayWritable;
      import org.apache.hadoop.util.StringUtils;

    -import parquet.column.ColumnDescriptor;
    +import parquet.hadoop.api.InitContext;
      import parquet.hadoop.api.ReadSupport;
      import parquet.io.api.RecordMaterializer;
    +import parquet.schema.GroupType;
      import parquet.schema.MessageType;
    -import parquet.schema.PrimitiveType;
    -import parquet.schema.PrimitiveType.PrimitiveTypeName;
      import parquet.schema.Type;
    -import parquet.schema.Type.Repetition;
    +import parquet.schema.Types;
    +import parquet.schema.PrimitiveType.PrimitiveTypeName;

      /**
       *
    @@ -45,8 +50,7 @@ import parquet.schema.Type.Repetition;
       */
      public class DataWritableReadSupport extends ReadSupport<ArrayWritable> {

    - private static final String TABLE_SCHEMA = "table_schema";
    - public static final String HIVE_SCHEMA_KEY = "HIVE_TABLE_SCHEMA";
    + public static final String HIVE_TABLE_AS_PARQUET_SCHEMA = "HIVE_TABLE_SCHEMA";
        public static final String PARQUET_COLUMN_INDEX_ACCESS = "parquet.column.index.access";

        /**
    @@ -56,80 +60,176 @@ public class DataWritableReadSupport ext
         * @param columns comma separated list of columns
         * @return list with virtual columns removed
         */
    - private static List<String> getColumns(final String columns) {
    + private static List<String> getColumnNames(final String columns) {
          return (List<String>) VirtualColumn.
              removeVirtualColumns(StringUtils.getStringCollection(columns));
        }

        /**
    + * Returns a list of TypeInfo objects from a string which contains column
    + * types strings.
         *
    - * It creates the readContext for Parquet side with the requested schema during the init phase.
    + * @param types Comma separated list of types
    + * @return A list of TypeInfo objects.
    + */
    + private static List<TypeInfo> getColumnTypes(final String types) {
    + return TypeInfoUtils.getTypeInfosFromTypeString(types);
    + }
    +
    + /**
    + * Searchs for a fieldName into a parquet GroupType by ignoring string case.
    + * GroupType#getType(String fieldName) is case sensitive, so we use this method.
         *
    - * @param configuration needed to get the wanted columns
    - * @param keyValueMetaData // unused
    - * @param fileSchema parquet file schema
    - * @return the parquet ReadContext
    + * @param groupType Group of field types where to search for fieldName
    + * @param fieldName The field what we are searching
    + * @return The Type object of the field found; null otherwise.
         */
    - @Override
    - public parquet.hadoop.api.ReadSupport.ReadContext init(final Configuration configuration,
    - final Map<String, String> keyValueMetaData, final MessageType fileSchema) {
    - final String columns = configuration.get(IOConstants.COLUMNS);
    - final Map<String, String> contextMetadata = new HashMap<String, String>();
    - final boolean indexAccess = configuration.getBoolean(PARQUET_COLUMN_INDEX_ACCESS, false);
    - if (columns != null) {
    - final List<String> listColumns = getColumns(columns);
    - final Map<String, String> lowerCaseFileSchemaColumns = new HashMap<String,String>();
    - for (ColumnDescriptor c : fileSchema.getColumns()) {
    - lowerCaseFileSchemaColumns.put(c.getPath()[0].toLowerCase(), c.getPath()[0]);
    + private static Type getFieldTypeIgnoreCase(GroupType groupType, String fieldName) {
    + for (Type type : groupType.getFields()) {
    + if (type.getName().equalsIgnoreCase(fieldName)) {
    + return type;
            }
    - final List<Type> typeListTable = new ArrayList<Type>();
    - if(indexAccess) {
    - for (int index = 0; index < listColumns.size(); index++) {
    - //Take columns based on index or pad the field
    - if(index < fileSchema.getFieldCount()) {
    - typeListTable.add(fileSchema.getType(index));
    - } else {
    - //prefixing with '_mask_' to ensure no conflict with named
    - //columns in the file schema
    - typeListTable.add(new PrimitiveType(Repetition.OPTIONAL, PrimitiveTypeName.BINARY, "_mask_"+listColumns.get(index)));
    + }
    +
    + return null;
    + }
    +
    + /**
    + * Searchs column names by name on a given Parquet schema, and returns its corresponded
    + * Parquet schema types.
    + *
    + * @param schema Group schema where to search for column names.
    + * @param colNames List of column names.
    + * @param colTypes List of column types.
    + * @return List of GroupType objects of projected columns.
    + */
    + private static List<Type> getProjectedGroupFields(GroupType schema, List<String> colNames, List<TypeInfo> colTypes) {
    + List<Type> schemaTypes = new ArrayList<Type>();
    +
    + ListIterator columnIterator = colNames.listIterator();
    + while (columnIterator.hasNext()) {
    + TypeInfo colType = colTypes.get(columnIterator.nextIndex());
    + String colName = (String) columnIterator.next();
    +
    + Type fieldType = getFieldTypeIgnoreCase(schema, colName);
    + if (fieldType != null) {
    + if (colType.getCategory() == ObjectInspector.Category.STRUCT) {
    + if (fieldType.isPrimitive()) {
    + throw new IllegalStateException("Invalid schema data type, found: PRIMITIVE, expected: STRUCT");
                }
    +
    + GroupType groupFieldType = fieldType.asGroupType();
    +
    + List<Type> groupFields = getProjectedGroupFields(
    + groupFieldType,
    + ((StructTypeInfo) colType).getAllStructFieldNames(),
    + ((StructTypeInfo) colType).getAllStructFieldTypeInfos()
    + );
    +
    + Type[] typesArray = groupFields.toArray(new Type[0]);
    + schemaTypes.add(Types.buildGroup(groupFieldType.getRepetition())
    + .addFields(typesArray)
    + .named(fieldType.getName())
    + );
    + } else {
    + schemaTypes.add(fieldType);
              }
            } else {
    - for (String col : listColumns) {
    - col = col.toLowerCase();
    - // listColumns contains partition columns which are metadata only
    - if (lowerCaseFileSchemaColumns.containsKey(col)) {
    - typeListTable.add(fileSchema.getType(lowerCaseFileSchemaColumns.get(col)));
    - } else {
    - // below allows schema evolution
    - typeListTable.add(new PrimitiveType(Repetition.OPTIONAL, PrimitiveTypeName.BINARY, col));
    - }
    - }
    + // Add type for schema evolution
    + schemaTypes.add(Types.optional(PrimitiveTypeName.BINARY).named(colName));
            }
    - MessageType tableSchema = new MessageType(TABLE_SCHEMA, typeListTable);
    - contextMetadata.put(HIVE_SCHEMA_KEY, tableSchema.toString());
    + }
    +
    + return schemaTypes;
    + }
    +
    + /**
    + * Searchs column names by name on a given Parquet message schema, and returns its projected
    + * Parquet schema types.
    + *
    + * @param schema Message type schema where to search for column names.
    + * @param colNames List of column names.
    + * @param colTypes List of column types.
    + * @return A MessageType object of projected columns.
    + */
    + private static MessageType getSchemaByName(MessageType schema, List<String> colNames, List<TypeInfo> colTypes) {
    + List<Type> projectedFields = getProjectedGroupFields(schema, colNames, colTypes);
    + Type[] typesArray = projectedFields.toArray(new Type[0]);
    +
    + return Types.buildMessage()
    + .addFields(typesArray)
    + .named(schema.getName());
    + }

    - final List<Integer> indexColumnsWanted = ColumnProjectionUtils.getReadColumnIDs(configuration);
    + /**
    + * Searchs column names by index on a given Parquet file schema, and returns its corresponded
    + * Parquet schema types.
    + *
    + * @param schema Message schema where to search for column names.
    + * @param colNames List of column names.
    + * @param colIndexes List of column indexes.
    + * @return A MessageType object of the column names found.
    + */
    + private static MessageType getSchemaByIndex(MessageType schema, List<String> colNames, List<Integer> colIndexes) {
    + List<Type> schemaTypes = new ArrayList<Type>();

    - final List<Type> typeListWanted = new ArrayList<Type>();
    + for (Integer i : colIndexes) {
    + if (i < colNames.size()) {
    + if (i < schema.getFieldCount()) {
    + schemaTypes.add(schema.getType(i));
    + } else {
    + //prefixing with '_mask_' to ensure no conflict with named
    + //columns in the file schema
    + schemaTypes.add(Types.optional(PrimitiveTypeName.BINARY).named("_mask_" + colNames.get(i)));
    + }
    + }
    + }

    - for (final Integer idx : indexColumnsWanted) {
    - if (idx < listColumns.size()) {
    - String col = listColumns.get(idx);
    - if (indexAccess) {
    - typeListWanted.add(fileSchema.getFields().get(idx));
    - } else {
    - col = col.toLowerCase();
    - if (lowerCaseFileSchemaColumns.containsKey(col)) {
    - typeListWanted.add(tableSchema.getType(lowerCaseFileSchemaColumns.get(col)));
    - }
    - }
    + return new MessageType(schema.getName(), schemaTypes);
    + }
    +
    + /**
    + * It creates the readContext for Parquet side with the requested schema during the init phase.
    + *
    + * @param context
    + * @return the parquet ReadContext
    + */
    + @Override
    + public parquet.hadoop.api.ReadSupport.ReadContext init(InitContext context) {
    + Configuration configuration = context.getConfiguration();
    + MessageType fileSchema = context.getFileSchema();
    + String columnNames = configuration.get(IOConstants.COLUMNS);
    + Map<String, String> contextMetadata = new HashMap<String, String>();
    + boolean indexAccess = configuration.getBoolean(PARQUET_COLUMN_INDEX_ACCESS, false);
    +
    + if (columnNames != null) {
    + List<String> columnNamesList = getColumnNames(columnNames);
    +
    + MessageType tableSchema;
    + if (indexAccess) {
    + List<Integer> indexSequence = new ArrayList<Integer>();
    +
    + // Generates a sequence list of indexes
    + for(int i = 0; i < columnNamesList.size(); i++) {
    + indexSequence.add(i);
              }
    +
    + tableSchema = getSchemaByIndex(fileSchema, columnNamesList, indexSequence);
    + } else {
    + String columnTypes = configuration.get(IOConstants.COLUMNS_TYPES);
    + List<TypeInfo> columnTypesList = getColumnTypes(columnTypes);
    +
    + tableSchema = getSchemaByName(fileSchema, columnNamesList, columnTypesList);
            }
    - MessageType requestedSchemaByUser = new MessageType(fileSchema.getName(), typeListWanted);
    +
    + contextMetadata.put(HIVE_TABLE_AS_PARQUET_SCHEMA, tableSchema.toString());
    +
    + List<Integer> indexColumnsWanted = ColumnProjectionUtils.getReadColumnIDs(configuration);
    + MessageType requestedSchemaByUser = getSchemaByIndex(tableSchema, columnNamesList, indexColumnsWanted);
    +
            return new ReadContext(requestedSchemaByUser, contextMetadata);
          } else {
    - contextMetadata.put(HIVE_SCHEMA_KEY, fileSchema.toString());
    + contextMetadata.put(HIVE_TABLE_AS_PARQUET_SCHEMA, fileSchema.toString());
            return new ReadContext(fileSchema, contextMetadata);
          }
        }

    Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java (original)
    +++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/ParquetRecordReaderWrapper.java Wed Apr 1 01:15:50 2015
    @@ -15,7 +15,12 @@ package org.apache.hadoop.hive.ql.io.par

      import java.io.IOException;
      import java.util.ArrayList;
    +import java.util.Collections;
    +import java.util.HashMap;
    +import java.util.HashSet;
      import java.util.List;
    +import java.util.Map;
    +import java.util.Set;

      import org.apache.commons.logging.Log;
      import org.apache.commons.logging.LogFactory;
    @@ -42,6 +47,7 @@ import parquet.filter2.predicate.FilterP
      import parquet.hadoop.ParquetFileReader;
      import parquet.hadoop.ParquetInputFormat;
      import parquet.hadoop.ParquetInputSplit;
    +import parquet.hadoop.api.InitContext;
      import parquet.hadoop.api.ReadSupport.ReadContext;
      import parquet.hadoop.metadata.BlockMetaData;
      import parquet.hadoop.metadata.FileMetaData;
    @@ -243,10 +249,10 @@ public class ParquetRecordReaderWrapper
            final List<BlockMetaData> blocks = parquetMetadata.getBlocks();
            final FileMetaData fileMetaData = parquetMetadata.getFileMetaData();

    - final ReadContext readContext = new DataWritableReadSupport()
    - .init(jobConf, fileMetaData.getKeyValueMetaData(), fileMetaData.getSchema());
    + final ReadContext readContext = new DataWritableReadSupport().init(new InitContext(jobConf,
    + null, fileMetaData.getSchema()));
            schemaSize = MessageTypeParser.parseMessageType(readContext.getReadSupportMetadata()
    - .get(DataWritableReadSupport.HIVE_SCHEMA_KEY)).getFieldCount();
    + .get(DataWritableReadSupport.HIVE_TABLE_AS_PARQUET_SCHEMA)).getFieldCount();
            final List<BlockMetaData> splitGroup = new ArrayList<BlockMetaData>();
            final long splitStart = ((FileSplit) oldSplit).getStart();
            final long splitLength = ((FileSplit) oldSplit).getLength();

    Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java (original)
    +++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java Wed Apr 1 01:15:50 2015
    @@ -645,30 +645,30 @@ public final class ColumnPrunerProcFacto
            // get the SEL(*) branch
            Operator<?> select = op.getChildOperators().get(LateralViewJoinOperator.SELECT_TAG);

    + // Update the info of SEL operator based on the pruned reordered columns
            // these are from ColumnPrunerSelectProc
            List<String> cols = cppCtx.getPrunedColList(select);
            RowSchema rs = op.getSchema();
    - if (rs.getSignature().size() != cols.size()) {
    - ArrayList<ExprNodeDesc> colList = new ArrayList<ExprNodeDesc>();
    - ArrayList<String> outputColNames = new ArrayList<String>();
    - for (String col : cols) {
    - // revert output cols of SEL(*) to ExprNodeColumnDesc
    - ColumnInfo colInfo = rs.getColumnInfo(col);
    - ExprNodeColumnDesc colExpr = new ExprNodeColumnDesc(colInfo);
    - colList.add(colExpr);
    - outputColNames.add(col);
    - }
    - // replace SEL(*) to SEL(exprs)
    - ((SelectDesc)select.getConf()).setSelStarNoCompute(false);
    - ((SelectDesc)select.getConf()).setColList(colList);
    - ((SelectDesc)select.getConf()).setOutputColumnNames(outputColNames);
    - pruneOperator(ctx, select, outputColNames);
    -
    - Operator<?> udtfPath = op.getChildOperators().get(LateralViewJoinOperator.UDTF_TAG);
    - List<String> lvFCols = new ArrayList<String>(cppCtx.getPrunedColLists().get(udtfPath));
    - lvFCols = Utilities.mergeUniqElems(lvFCols, outputColNames);
    - pruneOperator(ctx, op, lvFCols);
    + ArrayList<ExprNodeDesc> colList = new ArrayList<ExprNodeDesc>();
    + ArrayList<String> outputColNames = new ArrayList<String>();
    + for (String col : cols) {
    + // revert output cols of SEL(*) to ExprNodeColumnDesc
    + ColumnInfo colInfo = rs.getColumnInfo(col);
    + ExprNodeColumnDesc colExpr = new ExprNodeColumnDesc(colInfo);
    + colList.add(colExpr);
    + outputColNames.add(col);
            }
    + // replace SEL(*) to SEL(exprs)
    + ((SelectDesc)select.getConf()).setSelStarNoCompute(false);
    + ((SelectDesc)select.getConf()).setColList(colList);
    + ((SelectDesc)select.getConf()).setOutputColumnNames(outputColNames);
    + pruneOperator(ctx, select, outputColNames);
    +
    + Operator<?> udtfPath = op.getChildOperators().get(LateralViewJoinOperator.UDTF_TAG);
    + List<String> lvFCols = new ArrayList<String>(cppCtx.getPrunedColLists().get(udtfPath));
    + lvFCols = Utilities.mergeUniqElems(lvFCols, outputColNames);
    + pruneOperator(ctx, op, lvFCols);
    +
            return null;
          }
        }

    Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java (original)
    +++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java Wed Apr 1 01:15:50 2015
    @@ -517,16 +517,17 @@ public final class ConstantPropagateProc
            if (PrimitiveObjectInspectorUtils.isPrimitiveWritableClass(clz)) {
              PrimitiveObjectInspector poi = (PrimitiveObjectInspector) oi;
              TypeInfo typeInfo = poi.getTypeInfo();
    -
    - // Handling parameterized types (varchar, decimal, etc).
    - if (typeInfo.getTypeName().contains(serdeConstants.DECIMAL_TYPE_NAME)
    - || typeInfo.getTypeName().contains(serdeConstants.VARCHAR_TYPE_NAME)
    + // Handling parameterized types (varchar etc).
    + if (typeInfo.getTypeName().contains(serdeConstants.VARCHAR_TYPE_NAME)
    typeInfo.getTypeName().contains(serdeConstants.CHAR_TYPE_NAME)) {
                // Do not support parameterized types.
                return null;
              }
              o = poi.getPrimitiveJavaObject(o);
    + if (typeInfo.getTypeName().contains(serdeConstants.DECIMAL_TYPE_NAME)) {
    + return new ExprNodeConstantDesc(typeInfo, o);
    + }
            } else if (PrimitiveObjectInspectorUtils.isPrimitiveJavaClass(clz)) {

            } else {

    Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java (original)
    +++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveCalciteUtil.java Wed Apr 1 01:15:50 2015
    @@ -32,8 +32,16 @@ import org.apache.calcite.rel.core.RelFa
      import org.apache.calcite.rel.core.Sort;
      import org.apache.calcite.rel.type.RelDataTypeField;
      import org.apache.calcite.rex.RexBuilder;
    +import org.apache.calcite.rex.RexCall;
    +import org.apache.calcite.rex.RexCorrelVariable;
    +import org.apache.calcite.rex.RexDynamicParam;
    +import org.apache.calcite.rex.RexFieldAccess;
      import org.apache.calcite.rex.RexInputRef;
    +import org.apache.calcite.rex.RexLiteral;
    +import org.apache.calcite.rex.RexLocalRef;
      import org.apache.calcite.rex.RexNode;
    +import org.apache.calcite.rex.RexOver;
    +import org.apache.calcite.rex.RexRangeRef;
      import org.apache.calcite.rex.RexVisitor;
      import org.apache.calcite.rex.RexVisitorImpl;
      import org.apache.calcite.sql.SqlKind;
    @@ -535,6 +543,7 @@ public class HiveCalciteUtil {
          boolean deterministic = true;

          RexVisitor<Void> visitor = new RexVisitorImpl<Void>(true) {
    + @Override
            public Void visitCall(org.apache.calcite.rex.RexCall call) {
              if (!call.getOperator().isDeterministic()) {
                throw new Util.FoundOne(call);
    @@ -551,4 +560,59 @@ public class HiveCalciteUtil {

          return deterministic;
        }
    +
    + /**
    + * Walks over an expression and determines whether it is constant.
    + */
    + public static class ConstantFinder implements RexVisitor<Boolean> {
    +
    + @Override
    + public Boolean visitLiteral(RexLiteral literal) {
    + return true;
    + }
    +
    + @Override
    + public Boolean visitInputRef(RexInputRef inputRef) {
    + return false;
    + }
    +
    + @Override
    + public Boolean visitLocalRef(RexLocalRef localRef) {
    + throw new RuntimeException("Not expected to be called.");
    + }
    +
    + @Override
    + public Boolean visitOver(RexOver over) {
    + return false;
    + }
    +
    + @Override
    + public Boolean visitCorrelVariable(RexCorrelVariable correlVariable) {
    + return false;
    + }
    +
    + @Override
    + public Boolean visitDynamicParam(RexDynamicParam dynamicParam) {
    + return false;
    + }
    +
    + @Override
    + public Boolean visitCall(RexCall call) {
    + // Constant if operator is deterministic and all operands are
    + // constant.
    + return call.getOperator().isDeterministic()
    + && RexVisitorImpl.visitArrayAnd(this, call.getOperands());
    + }
    +
    + @Override
    + public Boolean visitRangeRef(RexRangeRef rangeRef) {
    + return false;
    + }
    +
    + @Override
    + public Boolean visitFieldAccess(RexFieldAccess fieldAccess) {
    + // "<expr>.FIELD" is constant iff "<expr>" is constant.
    + return fieldAccess.getReferenceExpr().accept(this);
    + }
    + }
      }

    Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java (original)
    +++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/PlanModifierForASTConv.java Wed Apr 1 01:15:50 2015
    @@ -38,6 +38,9 @@ import org.apache.calcite.rel.core.Sort;
      import org.apache.calcite.rel.rules.MultiJoin;
      import org.apache.calcite.rel.type.RelDataType;
      import org.apache.calcite.rel.type.RelDataTypeFactory;
    +import org.apache.calcite.rex.RexBuilder;
    +import org.apache.calcite.rex.RexCall;
    +import org.apache.calcite.rex.RexInputRef;
      import org.apache.calcite.rex.RexNode;
      import org.apache.calcite.sql.SqlAggFunction;
      import org.apache.calcite.sql.SqlKind;
    @@ -168,7 +171,27 @@ public class PlanModifierForASTConv {
          ImmutableMap.Builder<Integer, RexNode> inputRefToCallMapBldr = ImmutableMap.builder();
          for (int i = resultSchema.size(); i < rt.getFieldCount(); i++) {
            if (collationInputRefs.contains(i)) {
    - inputRefToCallMapBldr.put(i, obChild.getChildExps().get(i));
    + RexNode obyExpr = obChild.getChildExps().get(i);
    + if (obyExpr instanceof RexCall) {
    + int a = -1;
    + List<RexNode> operands = new ArrayList<>();
    + for (int k = 0; k< ((RexCall) obyExpr).operands.size(); k++) {
    + RexNode rn = ((RexCall) obyExpr).operands.get(k);
    + for (int j = 0; j < resultSchema.size(); j++) {
    + if( obChild.getChildExps().get(j).toString().equals(rn.toString())) {
    + a = j;
    + break;
    + }
    + } if (a != -1) {
    + operands.add(new RexInputRef(a, rn.getType()));
    + } else {
    + operands.add(rn);
    + }
    + a = -1;
    + }
    + obyExpr = obChild.getCluster().getRexBuilder().makeCall(((RexCall)obyExpr).getOperator(), operands);
    + }
    + inputRefToCallMapBldr.put(i, obyExpr);
            }
          }
          ImmutableMap<Integer, RexNode> inputRefToCallMap = inputRefToCallMapBldr.build();
    @@ -266,7 +289,7 @@ public class PlanModifierForASTConv {
          RelNode select = introduceDerivedTable(rel);

          parent.replaceInput(pos, select);
    -
    +
          return select;
        }

    @@ -352,7 +375,7 @@ public class PlanModifierForASTConv {

          return validChild;
        }
    -
    +
        private static boolean isEmptyGrpAggr(RelNode gbNode) {
          // Verify if both groupset and aggrfunction are empty)
          Aggregate aggrnode = (Aggregate) gbNode;
    @@ -361,12 +384,12 @@ public class PlanModifierForASTConv {
          }
          return false;
        }
    -
    +
        private static void replaceEmptyGroupAggr(final RelNode rel, RelNode parent) {
          // If this function is called, the parent should only include constant
          List<RexNode> exps = parent.getChildExps();
          for (RexNode rexNode : exps) {
    - if (rexNode.getKind() != SqlKind.LITERAL) {
    + if (!rexNode.accept(new HiveCalciteUtil.ConstantFinder())) {
              throw new RuntimeException("We expect " + parent.toString()
                  + " to contain only constants. However, " + rexNode.toString() + " is "
                  + rexNode.getKind());
    @@ -377,7 +400,7 @@ public class PlanModifierForASTConv {
          RelDataType longType = TypeConverter.convert(TypeInfoFactory.longTypeInfo, typeFactory);
          RelDataType intType = TypeConverter.convert(TypeInfoFactory.intTypeInfo, typeFactory);
          // Create the dummy aggregation.
    - SqlAggFunction countFn = (SqlAggFunction) SqlFunctionConverter.getCalciteAggFn("count",
    + SqlAggFunction countFn = SqlFunctionConverter.getCalciteAggFn("count",
              ImmutableList.of(intType), longType);
          // TODO: Using 0 might be wrong; might need to walk down to find the
          // proper index of a dummy.

    Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java (original)
    +++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java Wed Apr 1 01:15:50 2015
    @@ -69,6 +69,7 @@ import org.apache.calcite.rel.rules.Filt
      import org.apache.calcite.rel.rules.JoinPushTransitivePredicatesRule;
      import org.apache.calcite.rel.rules.JoinToMultiJoinRule;
      import org.apache.calcite.rel.rules.LoptOptimizeJoinRule;
    +import org.apache.calcite.rel.rules.ProjectMergeRule;
      import org.apache.calcite.rel.rules.ProjectRemoveRule;
      import org.apache.calcite.rel.rules.ReduceExpressionsRule;
      import org.apache.calcite.rel.rules.SemiJoinFilterTransposeRule;
    @@ -721,6 +722,7 @@ public class CalcitePlanner extends Sema
            hepPgmBldr.addRuleInstance(ReduceExpressionsRule.PROJECT_INSTANCE);
            hepPgmBldr.addRuleInstance(ProjectRemoveRule.INSTANCE);
            hepPgmBldr.addRuleInstance(UnionMergeRule.INSTANCE);
    + hepPgmBldr.addRuleInstance(new ProjectMergeRule(false, HiveProject.DEFAULT_PROJECT_FACTORY));

            hepPgm = hepPgmBldr.build();
            HepPlanner hepPlanner = new HepPlanner(hepPgm);
    @@ -785,8 +787,6 @@ public class CalcitePlanner extends Sema
            // 3. Transitive inference & Partition Pruning
            basePlan = hepPlan(basePlan, false, mdProvider, new JoinPushTransitivePredicatesRule(
                Join.class, HiveFilter.DEFAULT_FILTER_FACTORY),
    - // TODO: Enable it after CALCITE-407 is fixed
    - // RemoveTrivialProjectRule.INSTANCE,
                new HivePartitionPruneRule(conf));

            // 4. Projection Pruning
  • Prasanthj at Apr 1, 2015 at 1:15 am
    Modified: hive/branches/llap/ql/src/test/results/clientpositive/join32_lessSize.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/join32_lessSize.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/join32_lessSize.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/join32_lessSize.q.out Wed Apr 1 01:15:50 2015
    @@ -119,11 +119,11 @@ STAGE PLANS:
        Stage: Stage-8
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -279,16 +279,16 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_1:$hdt$_1:z]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:z]

        Stage: Stage-7
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:y
    + $hdt$_0:y
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:y
    + $hdt$_0:y
                TableScan
                  alias: y
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -666,11 +666,11 @@ STAGE PLANS:
        Stage: Stage-11
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_1:$hdt$_2:$hdt$_2:x
    + $hdt$_1:$hdt$_2:$hdt$_2:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_1:$hdt$_2:$hdt$_2:x
    + $hdt$_1:$hdt$_2:$hdt$_2:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -780,16 +780,16 @@ STAGE PLANS:
                    name: default.src1
                  name: default.src1
            Truncated Path -> Alias:
    - /src1 [$hdt$_0:$hdt$_1:$hdt$_2:$hdt$_3:x]
    + /src1 [$hdt$_1:$hdt$_2:$hdt$_3:x]

        Stage: Stage-10
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_1:$hdt$_1:w
    + $hdt$_1:$hdt$_1:w
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_1:$hdt$_1:w
    + $hdt$_1:$hdt$_1:w
                TableScan
                  alias: w
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -914,11 +914,11 @@ STAGE PLANS:
        Stage: Stage-9
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:w
    + $hdt$_0:w
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:w
    + $hdt$_0:w
                TableScan
                  alias: w
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -1319,11 +1319,11 @@ STAGE PLANS:
        Stage: Stage-8
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -1481,12 +1481,12 @@ STAGE PLANS:
                    name: default.src1
                  name: default.src1
            Truncated Path -> Alias:
    - /src [$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:y]
    + /src [$hdt$_1:$hdt$_1:y]

        Stage: Stage-7
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:z
    + $hdt$_0:z
                Fetch Operator
                  limit: -1
                  Partition Description:
    @@ -1536,7 +1536,7 @@ STAGE PLANS:
                          name: default.srcpart
                        name: default.srcpart
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:z
    + $hdt$_0:z
                TableScan
                  alias: z
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -1931,11 +1931,11 @@ STAGE PLANS:
        Stage: Stage-9
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:y
    + $hdt$_0:$hdt$_0:y
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:y
    + $hdt$_0:$hdt$_0:y
                TableScan
                  alias: y
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -2089,7 +2089,7 @@ STAGE PLANS:
                    name: default.src1
                  name: default.src1
            Truncated Path -> Alias:
    - /src1 [$hdt$_0:$hdt$_0:$hdt$_1:x]
    + /src1 [$hdt$_0:$hdt$_1:x]

        Stage: Stage-8
          Map Reduce Local Work
    @@ -2469,11 +2469,11 @@ STAGE PLANS:
        Stage: Stage-8
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -2526,11 +2526,11 @@ STAGE PLANS:
        Stage: Stage-7
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:x
    + $hdt$_0:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:x
    + $hdt$_0:x
                TableScan
                  alias: x
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -2726,11 +2726,11 @@ STAGE PLANS:
        Stage: Stage-8
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -2783,11 +2783,11 @@ STAGE PLANS:
        Stage: Stage-7
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:y
    + $hdt$_0:y
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:y
    + $hdt$_0:y
                TableScan
                  alias: y
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/join33.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/join33.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/join33.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/join33.q.out Wed Apr 1 01:15:50 2015
    @@ -109,14 +109,14 @@ STAGE PLANS:
        Stage: Stage-7
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:y
    + $hdt$_0:y
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:y
    + $hdt$_0:y
                TableScan
                  alias: y
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -134,7 +134,7 @@ STAGE PLANS:
                          0 _col0 (type: string)
                          1 _col3 (type: string)
                        Position of Big Table: 1
    - $hdt$_0:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -356,7 +356,7 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_1:$hdt$_1:z]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:z]

        Stage: Stage-0
          Move Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/join35.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/join35.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/join35.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/join35.q.out Wed Apr 1 01:15:50 2015
    @@ -232,7 +232,7 @@ STAGE PLANS:
                    name: default.src
                  name: default.src
            Truncated Path -> Alias:
    - /src [$hdt$_0-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:x]
    + /src [$hdt$_0-subquery1:$hdt$_0-subquery1:$hdt$_0:x]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -569,7 +569,7 @@ STAGE PLANS:
                    name: default.src
                  name: default.src
            Truncated Path -> Alias:
    - /src [$hdt$_0-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:x]
    + /src [$hdt$_0-subquery2:$hdt$_0-subquery2:$hdt$_0:x]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/leadlag.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/leadlag.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/leadlag.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/leadlag.q.out Wed Apr 1 01:15:50 2015
    @@ -223,6 +223,8 @@ Manufacturer#5 almond antique sky peru o
      Manufacturer#5 almond aquamarine dodger light gainsboro 46 17
      Manufacturer#5 almond azure blanched chiffon midnight 23 21
      PREHOOK: query: -- 6. testRankInLead
    +-- disable cbo because of CALCITE-653
    +
      select p_mfgr, p_name, p_size, r1,
      lead(r1,1,r1) over (distribute by p_mfgr sort by p_name) as deltaRank
      from (
    @@ -234,6 +236,8 @@ PREHOOK: type: QUERY
      PREHOOK: Input: default@part
      #### A masked pattern was here ####
      POSTHOOK: query: -- 6. testRankInLead
    +-- disable cbo because of CALCITE-653
    +
      select p_mfgr, p_name, p_size, r1,
      lead(r1,1,r1) over (distribute by p_mfgr sort by p_name) as deltaRank
      from (

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_1.q.out Wed Apr 1 01:15:50 2015
    @@ -207,8 +207,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-0
          Move Operator
    @@ -523,7 +523,7 @@ STAGE PLANS:
                    name: default.list_bucketing_dynamic_part
                  name: default.list_bucketing_dynamic_part
            Truncated Path -> Alias:
    - /list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=484 [$hdt$_0:list_bucketing_dynamic_part]
    + /list_bucketing_dynamic_part/ds=2008-04-08/hr=11/key=484 [list_bucketing_dynamic_part]

        Stage: Stage-0
          Fetch Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_11.q.java1.7.out Wed Apr 1 01:15:50 2015
    @@ -395,7 +395,7 @@ STAGE PLANS:
                    name: default.list_bucketing_static_part
                  name: default.list_bucketing_static_part
            Truncated Path -> Alias:
    - /list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_466 [$hdt$_0:list_bucketing_static_part]
    + /list_bucketing_static_part/ds=2008-04-08/hr=11/value=val_466 [list_bucketing_static_part]

        Stage: Stage-0
          Fetch Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_12.q.java1.7.out Wed Apr 1 01:15:50 2015
    @@ -166,7 +166,7 @@ STAGE PLANS:
                    name: default.src
                  name: default.src
            Truncated Path -> Alias:
    - /src [$hdt$_0:src]
    + /src [src]

        Stage: Stage-0
          Move Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_13.q.java1.7.out Wed Apr 1 01:15:50 2015
    @@ -166,7 +166,7 @@ STAGE PLANS:
                    name: default.src
                  name: default.src
            Truncated Path -> Alias:
    - /src [$hdt$_0:src]
    + /src [src]

        Stage: Stage-0
          Move Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_2.q.java1.7.out Wed Apr 1 01:15:50 2015
    @@ -231,8 +231,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-0
          Move Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_3.q.out Wed Apr 1 01:15:50 2015
    @@ -199,8 +199,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-0
          Move Operator
    @@ -463,7 +463,7 @@ STAGE PLANS:
                    name: default.list_bucketing_static_part
                  name: default.list_bucketing_static_part
            Truncated Path -> Alias:
    - /list_bucketing_static_part/ds=2008-04-08/hr=11/key=484 [$hdt$_0:list_bucketing_static_part]
    + /list_bucketing_static_part/ds=2008-04-08/hr=11/key=484 [list_bucketing_static_part]

        Stage: Stage-0
          Fetch Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_4.q.java1.7.out Wed Apr 1 01:15:50 2015
    @@ -239,8 +239,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-0
          Move Operator
    @@ -534,8 +534,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-7
          Conditional Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_5.q.java1.7.out Wed Apr 1 01:15:50 2015
    @@ -211,8 +211,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-0
          Move Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_6.q.java1.7.out Wed Apr 1 01:15:50 2015
    @@ -295,8 +295,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-0
          Move Operator
    @@ -650,8 +650,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-7
          Conditional Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_7.q.out Wed Apr 1 01:15:50 2015
    @@ -241,8 +241,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-0
          Move Operator
    @@ -596,8 +596,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-7
          Conditional Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_8.q.java1.7.out Wed Apr 1 01:15:50 2015
    @@ -297,8 +297,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-0
          Move Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_dml_9.q.java1.7.out Wed Apr 1 01:15:50 2015
    @@ -239,8 +239,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-0
          Move Operator
    @@ -534,8 +534,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-7
          Conditional Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_1.q.out Wed Apr 1 01:15:50 2015
    @@ -246,7 +246,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_daily/ds=1/hr=4/key=484/value=val_484 [$hdt$_0:fact_daily]
    + /fact_daily/ds=1/hr=4/key=484/value=val_484 [fact_daily]

        Stage: Stage-0
          Fetch Operator
    @@ -406,7 +406,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_daily/ds=1/hr=4/key=238/value=val_238 [$hdt$_0:fact_daily]
    + /fact_daily/ds=1/hr=4/key=238/value=val_238 [fact_daily]

        Stage: Stage-0
          Fetch Operator
    @@ -559,7 +559,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily]
    + /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily]

        Stage: Stage-0
          Fetch Operator
    @@ -713,7 +713,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily]
    + /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily]

        Stage: Stage-0
          Fetch Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_2.q.out Wed Apr 1 01:15:50 2015
    @@ -290,8 +290,8 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily]
    - /fact_daily/ds=1/hr=4/key=484/value=val_484 [$hdt$_0:fact_daily]
    + /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily]
    + /fact_daily/ds=1/hr=4/key=484/value=val_484 [fact_daily]

        Stage: Stage-0
          Fetch Operator
    @@ -443,7 +443,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily]
    + /fact_daily/ds=1/hr=4/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily]

        Stage: Stage-0
          Fetch Operator
    @@ -662,8 +662,8 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_daily/ds=1/hr=4/key=238/value=val_238 [$hdt$_0:fact_daily]
    - /fact_daily/ds=1/hr=4/key=484/value=val_484 [$hdt$_0:fact_daily]
    + /fact_daily/ds=1/hr=4/key=238/value=val_238 [fact_daily]
    + /fact_daily/ds=1/hr=4/key=484/value=val_484 [fact_daily]

        Stage: Stage-0
          Fetch Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_multiskew_3.q.out Wed Apr 1 01:15:50 2015
    @@ -489,7 +489,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_daily/ds=1/hr=1 [$hdt$_0:$hdt$_0:fact_daily]
    + /fact_daily/ds=1/hr=1 [$hdt$_0:fact_daily]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_1.q.out Wed Apr 1 01:15:50 2015
    @@ -299,7 +299,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily]
    + /fact_tz/ds=1/x=484 [fact_daily]

        Stage: Stage-0
          Fetch Operator
    @@ -446,7 +446,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily]
    + /fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily]

        Stage: Stage-0
          Fetch Operator
    @@ -589,7 +589,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily]
    + /fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily]

        Stage: Stage-0
          Fetch Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_2.q.out Wed Apr 1 01:15:50 2015
    @@ -316,7 +316,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily]
    + /fact_tz/ds=1/x=484 [fact_daily]

        Stage: Stage-0
          Fetch Operator
    @@ -483,7 +483,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily]
    + /fact_tz/ds=1/x=484 [fact_daily]

        Stage: Stage-0
          Fetch Operator
    @@ -630,7 +630,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_tz/ds=1/x=484 [$hdt$_0:$hdt$_0:fact_daily]
    + /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -822,7 +822,7 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_tz/ds=1/x=484 [$hdt$_0:$hdt$_0:fact_daily]
    + /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/list_bucket_query_oneskew_3.q.out Wed Apr 1 01:15:50 2015
    @@ -419,9 +419,9 @@ STAGE PLANS:
                    name: default.fact_daily
                  name: default.fact_daily
            Truncated Path -> Alias:
    - /fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [$hdt$_0:fact_daily]
    - /fact_tz/ds=1/x=238 [$hdt$_0:fact_daily]
    - /fact_tz/ds=1/x=484 [$hdt$_0:fact_daily]
    + /fact_tz/ds=1/HIVE_DEFAULT_LIST_BUCKETING_DIR_NAME [fact_daily]
    + /fact_tz/ds=1/x=238 [fact_daily]
    + /fact_tz/ds=1/x=484 [fact_daily]

        Stage: Stage-0
          Fetch Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/literal_decimal.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/literal_decimal.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/literal_decimal.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/literal_decimal.q.out Wed Apr 1 01:15:50 2015
    @@ -14,7 +14,7 @@ STAGE PLANS:
                alias: src
                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: COMPLETE
                Select Operator
    - expressions: (- 1) (type: decimal(1,0)), 0 (type: decimal(1,0)), 1 (type: decimal(1,0)), 3.14 (type: decimal(3,2)), (- 3.14) (type: decimal(3,2)), 99999999999999999 (type: decimal(17,0)), 99999999999999999.9999999999999 (type: decimal(30,13)), 1E99 (type: decimal(1,0))
    + expressions: -1 (type: decimal(1,0)), 0 (type: decimal(1,0)), 1 (type: decimal(1,0)), 3.14 (type: decimal(3,2)), -3.14 (type: decimal(3,2)), 99999999999999999 (type: decimal(17,0)), 99999999999999999.9999999999999 (type: decimal(30,13)), 1E99 (type: decimal(1,0))
                  outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                  Statistics: Num rows: 500 Data size: 448000 Basic stats: COMPLETE Column stats: COMPLETE
                  Limit

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/load_dyn_part14.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/load_dyn_part14.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/load_dyn_part14.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/load_dyn_part14.q.out Wed Apr 1 01:15:50 2015
    @@ -76,22 +76,22 @@ STAGE PLANS:
                  Select Operator
                    expressions: 'k1' (type: string), UDFToString(null) (type: string)
                    outputColumnNames: _col0, _col1
    - Statistics: Num rows: 500 Data size: 135000 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 500 Data size: 85000 Basic stats: COMPLETE Column stats: COMPLETE
                    Limit
                      Number of rows: 2
    - Statistics: Num rows: 2 Data size: 540 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE
                      Reduce Output Operator
                        sort order:
    - Statistics: Num rows: 2 Data size: 540 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE
                        value expressions: _col0 (type: string), _col1 (type: string)
            Reduce Operator Tree:
              Select Operator
                expressions: VALUE._col0 (type: string), VALUE._col1 (type: string)
                outputColumnNames: _col0, _col1
    - Statistics: Num rows: 2 Data size: 540 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE
                Limit
                  Number of rows: 2
    - Statistics: Num rows: 2 Data size: 540 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 2 Data size: 340 Basic stats: COMPLETE Column stats: COMPLETE
                  File Output Operator
                    compressed: false
                    table:
    @@ -104,10 +104,10 @@ STAGE PLANS:
            Map Operator Tree:
                TableScan
                  Union
    - Statistics: Num rows: 6 Data size: 1222 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE
                    File Output Operator
                      compressed: false
    - Statistics: Num rows: 6 Data size: 1222 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE
                      table:
                          input format: org.apache.hadoop.mapred.TextInputFormat
                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    @@ -115,10 +115,10 @@ STAGE PLANS:
                          name: default.nzhang_part14
                TableScan
                  Union
    - Statistics: Num rows: 6 Data size: 1222 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE
                    File Output Operator
                      compressed: false
    - Statistics: Num rows: 6 Data size: 1222 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE
                      table:
                          input format: org.apache.hadoop.mapred.TextInputFormat
                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    @@ -126,10 +126,10 @@ STAGE PLANS:
                          name: default.nzhang_part14
                TableScan
                  Union
    - Statistics: Num rows: 6 Data size: 1222 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE
                    File Output Operator
                      compressed: false
    - Statistics: Num rows: 6 Data size: 1222 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 6 Data size: 1022 Basic stats: COMPLETE Column stats: COMPLETE
                      table:
                          input format: org.apache.hadoop.mapred.TextInputFormat
                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/louter_join_ppr.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/louter_join_ppr.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/louter_join_ppr.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/louter_join_ppr.q.out Wed Apr 1 01:15:50 2015
    @@ -1149,9 +1149,9 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /src [$hdt$_0:$hdt$_1:$hdt$_1:a]
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:$hdt$_0:b]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:$hdt$_0:b]
    + /src [$hdt$_1:$hdt$_1:a]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_0:b]
    + /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_0:b]
            Needs Tagging: true
            Reduce Operator Tree:
              Join Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/mapjoin_mapjoin.q.out Wed Apr 1 01:15:50 2015
    @@ -64,14 +64,14 @@ STAGE PLANS:
        Stage: Stage-6
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:src
    + $hdt$_0:src
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_1:$hdt$_2:src1
    + $hdt$_1:$hdt$_2:src1
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:src
    + $hdt$_0:src
                TableScan
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -89,7 +89,7 @@ STAGE PLANS:
                          0 _col0 (type: string)
                          1 _col1 (type: string)
                        Position of Big Table: 1
    - $hdt$_0:$hdt$_1:$hdt$_2:src1
    + $hdt$_1:$hdt$_2:src1
                TableScan
                  alias: src1
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -444,10 +444,10 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_1:$hdt$_1:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:$hdt$_1:$hdt$_1:srcpart]
    - /srcpart/ds=2008-04-09/hr=11 [$hdt$_0:$hdt$_1:$hdt$_1:srcpart]
    - /srcpart/ds=2008-04-09/hr=12 [$hdt$_0:$hdt$_1:$hdt$_1:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [$hdt$_1:$hdt$_1:srcpart]
    + /srcpart/ds=2008-04-09/hr=11 [$hdt$_1:$hdt$_1:srcpart]
    + /srcpart/ds=2008-04-09/hr=12 [$hdt$_1:$hdt$_1:srcpart]

        Stage: Stage-0
          Fetch Operator
    @@ -470,14 +470,14 @@ STAGE PLANS:
        Stage: Stage-6
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:src
    + $hdt$_0:src
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_1:$hdt$_2:src1
    + $hdt$_1:$hdt$_2:src1
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:src
    + $hdt$_0:src
                TableScan
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -492,7 +492,7 @@ STAGE PLANS:
                        keys:
                          0 _col0 (type: string)
                          1 _col1 (type: string)
    - $hdt$_0:$hdt$_1:$hdt$_2:src1
    + $hdt$_1:$hdt$_2:src1
                TableScan
                  alias: src1
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/mapjoin_subquery.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/mapjoin_subquery.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/mapjoin_subquery.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/mapjoin_subquery.q.out Wed Apr 1 01:15:50 2015
    @@ -27,14 +27,14 @@ STAGE PLANS:
        Stage: Stage-6
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:z
    + $hdt$_0:z
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:z
    + $hdt$_0:z
                TableScan
                  alias: z
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -49,7 +49,7 @@ STAGE PLANS:
                        keys:
                          0 _col0 (type: string)
                          1 _col0 (type: string)
    - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -270,14 +270,14 @@ STAGE PLANS:
        Stage: Stage-6
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:z
    + $hdt$_0:z
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:z
    + $hdt$_0:z
                TableScan
                  alias: z
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -292,7 +292,7 @@ STAGE PLANS:
                        keys:
                          0 _col0 (type: string)
                          1 _col0 (type: string)
    - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/multiMapJoin2.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/multiMapJoin2.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/multiMapJoin2.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/multiMapJoin2.q.out Wed Apr 1 01:15:50 2015
    @@ -761,11 +761,11 @@ STAGE PLANS:
        Stage: Stage-17
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1
    + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1
    + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1
                TableScan
                  alias: y1
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -833,11 +833,11 @@ STAGE PLANS:
        Stage: Stage-15
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$INTNAME1
    + $hdt$_0:$INTNAME1
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$INTNAME1
    + $hdt$_0:$INTNAME1
                TableScan
                  HashTable Sink Operator
                    keys:
    @@ -917,11 +917,11 @@ STAGE PLANS:
        Stage: Stage-16
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$INTNAME
    + $hdt$_0:$INTNAME
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$INTNAME
    + $hdt$_0:$INTNAME
                TableScan
                  HashTable Sink Operator
                    keys:
    @@ -993,11 +993,11 @@ STAGE PLANS:
        Stage: Stage-18
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1
    + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1
    + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1
                TableScan
                  alias: y1
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -1159,14 +1159,14 @@ STAGE PLANS:
        Stage: Stage-9
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1
    + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1
    + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1
    + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:y1
                TableScan
                  alias: y1
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -1181,7 +1181,7 @@ STAGE PLANS:
                        keys:
                          0 _col0 (type: string)
                          1 _col0 (type: string)
    - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1
    + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:y1
                TableScan
                  alias: y1
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -1488,11 +1488,11 @@ STAGE PLANS:
        Stage: Stage-12
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$INTNAME1
    + $hdt$_0:$INTNAME1
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$INTNAME1
    + $hdt$_0:$INTNAME1
                TableScan
                  HashTable Sink Operator
                    keys:
    @@ -1575,11 +1575,11 @@ STAGE PLANS:
        Stage: Stage-13
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$INTNAME
    + $hdt$_0:$INTNAME
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$INTNAME
    + $hdt$_0:$INTNAME
                TableScan
                  HashTable Sink Operator
                    keys:
    @@ -1658,11 +1658,11 @@ STAGE PLANS:
        Stage: Stage-14
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1
    + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1
    + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1
                TableScan
                  alias: x1
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -1823,11 +1823,11 @@ STAGE PLANS:
        Stage: Stage-7
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1
    + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1
    + $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:x1
                TableScan
                  alias: x1
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -2218,11 +2218,11 @@ STAGE PLANS:
        Stage: Stage-15
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:a
    + null-subquery1:$hdt$_0-subquery1:$hdt$_1:$hdt$_1:$hdt$_1:a
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:a
    + null-subquery1:$hdt$_0-subquery1:$hdt$_1:$hdt$_1:$hdt$_1:a
                TableScan
                  alias: a
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -2287,14 +2287,14 @@ STAGE PLANS:
        Stage: Stage-14
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:a
    + null-subquery1:$hdt$_0-subquery1:$hdt$_0:a
                Fetch Operator
                  limit: -1
    - null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:a
    + null-subquery2:$hdt$_0-subquery2:$hdt$_0:a
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:a
    + null-subquery1:$hdt$_0-subquery1:$hdt$_0:a
                TableScan
                  alias: a
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -2309,7 +2309,7 @@ STAGE PLANS:
                        keys:
                          0 _col0 (type: string)
                          1 _col0 (type: string)
    - null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:a
    + null-subquery2:$hdt$_0-subquery2:$hdt$_0:a
                TableScan
                  alias: a
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -2370,11 +2370,11 @@ STAGE PLANS:
        Stage: Stage-16
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:a
    + null-subquery2:$hdt$_0-subquery2:$hdt$_1:$hdt$_1:$hdt$_1:a
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:a
    + null-subquery2:$hdt$_0-subquery2:$hdt$_1:$hdt$_1:$hdt$_1:a
                TableScan
                  alias: a
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/nonblock_op_deduplicate.q.out Wed Apr 1 01:15:50 2015
    @@ -20,20 +20,16 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Select Operator
    - expressions: (UDFToDouble(key) + 1.0) (type: double)
    - outputColumnNames: _col0
    + expressions: (UDFToDouble(key) + 1.0) (type: double), ((UDFToDouble(key) + 1.0) + 1.0) (type: double)
    + outputColumnNames: _col0, _col1
                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: _col0 (type: double), (_col0 + 1.0) (type: double)
    - outputColumnNames: _col0, _col1
    + File Output Operator
    + compressed: false
                      Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

        Stage: Stage-0
          Fetch Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/parquet_columnar.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/parquet_columnar.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/parquet_columnar.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/parquet_columnar.q.out Wed Apr 1 01:15:50 2015
    @@ -11,8 +11,8 @@ PREHOOK: type: DROPTABLE
      POSTHOOK: query: DROP TABLE IF EXISTS parquet_columnar_renamed
      POSTHOOK: type: DROPTABLE
      PREHOOK: query: CREATE TABLE parquet_columnar_access_stage (
    - s string,
    - i int,
    + s string,
    + i int,
          f float
        ) ROW FORMAT DELIMITED
        FIELDS TERMINATED BY '|'
    @@ -20,8 +20,8 @@ PREHOOK: type: CREATETABLE
      PREHOOK: Output: database:default
      PREHOOK: Output: default@parquet_columnar_access_stage
      POSTHOOK: query: CREATE TABLE parquet_columnar_access_stage (
    - s string,
    - i int,
    + s string,
    + i int,
          f float
        ) ROW FORMAT DELIMITED
        FIELDS TERMINATED BY '|'
    @@ -32,7 +32,8 @@ PREHOOK: query: CREATE TABLE parquet_col
          s string,
          x int,
          y int,
    - f float
    + f float,
    + address struct<intVals:int,strVals:string>
        ) STORED AS PARQUET
      PREHOOK: type: CREATETABLE
      PREHOOK: Output: database:default
    @@ -41,7 +42,8 @@ POSTHOOK: query: CREATE TABLE parquet_co
          s string,
          x int,
          y int,
    - f float
    + f float,
    + address struct<intVals:int,strVals:string>
        ) STORED AS PARQUET
      POSTHOOK: type: CREATETABLE
      POSTHOOK: Output: database:default
    @@ -54,14 +56,17 @@ POSTHOOK: query: LOAD DATA LOCAL INPATH
      POSTHOOK: type: LOAD
      #### A masked pattern was here ####
      POSTHOOK: Output: default@parquet_columnar_access_stage
    -PREHOOK: query: INSERT OVERWRITE TABLE parquet_columnar_access SELECT s, i, (i + 1), f FROM parquet_columnar_access_stage
    +PREHOOK: query: INSERT OVERWRITE TABLE parquet_columnar_access SELECT s, i, (i + 1), f, named_struct('intVals',
    +i,'strVals',s) FROM parquet_columnar_access_stage
      PREHOOK: type: QUERY
      PREHOOK: Input: default@parquet_columnar_access_stage
      PREHOOK: Output: default@parquet_columnar_access
    -POSTHOOK: query: INSERT OVERWRITE TABLE parquet_columnar_access SELECT s, i, (i + 1), f FROM parquet_columnar_access_stage
    +POSTHOOK: query: INSERT OVERWRITE TABLE parquet_columnar_access SELECT s, i, (i + 1), f, named_struct('intVals',
    +i,'strVals',s) FROM parquet_columnar_access_stage
      POSTHOOK: type: QUERY
      POSTHOOK: Input: default@parquet_columnar_access_stage
      POSTHOOK: Output: default@parquet_columnar_access
    +POSTHOOK: Lineage: parquet_columnar_access.address EXPRESSION [(parquet_columnar_access_stage)parquet_columnar_access_stage.FieldSchema(name:i, type:int, comment:null), (parquet_columnar_access_stage)parquet_columnar_access_stage.FieldSchema(name:s, type:string, comment:null), ]
      POSTHOOK: Lineage: parquet_columnar_access.f SIMPLE [(parquet_columnar_access_stage)parquet_columnar_access_stage.FieldSchema(name:f, type:float, comment:null), ]
      POSTHOOK: Lineage: parquet_columnar_access.s SIMPLE [(parquet_columnar_access_stage)parquet_columnar_access_stage.FieldSchema(name:s, type:string, comment:null), ]
      POSTHOOK: Lineage: parquet_columnar_access.x SIMPLE [(parquet_columnar_access_stage)parquet_columnar_access_stage.FieldSchema(name:i, type:int, comment:null), ]
    @@ -74,27 +79,27 @@ POSTHOOK: query: SELECT * FROM parquet_c
      POSTHOOK: type: QUERY
      POSTHOOK: Input: default@parquet_columnar_access
      #### A masked pattern was here ####
    -1abc00 1 2 1.0
    -1def01 2 3 1.1
    -1ghi02 3 4 1.2
    -1jkl03 1 2 1.3
    -1mno04 2 3 1.4
    -1pqr05 3 4 1.0
    -1stu06 1 2 1.1
    -1vwx07 2 3 1.2
    -1yza08 3 4 1.3
    -1bcd09 1 2 1.4
    -1efg10 2 3 1.0
    -1hij11 3 4 1.1
    -1klm12 1 2 1.2
    -1nop13 2 3 1.3
    -1qrs14 3 4 1.4
    -1tuv15 1 2 1.0
    -1wxy16 2 3 1.1
    -1zab17 3 4 1.2
    -1cde18 1 2 1.3
    -1fgh19 2 3 1.4
    -1ijk20 3 4 1.0
    +1abc00 1 2 1.0 {"intVals":1,"strVals":"1abc00"}
    +1def01 2 3 1.1 {"intVals":2,"strVals":"1def01"}
    +1ghi02 3 4 1.2 {"intVals":3,"strVals":"1ghi02"}
    +1jkl03 1 2 1.3 {"intVals":1,"strVals":"1jkl03"}
    +1mno04 2 3 1.4 {"intVals":2,"strVals":"1mno04"}
    +1pqr05 3 4 1.0 {"intVals":3,"strVals":"1pqr05"}
    +1stu06 1 2 1.1 {"intVals":1,"strVals":"1stu06"}
    +1vwx07 2 3 1.2 {"intVals":2,"strVals":"1vwx07"}
    +1yza08 3 4 1.3 {"intVals":3,"strVals":"1yza08"}
    +1bcd09 1 2 1.4 {"intVals":1,"strVals":"1bcd09"}
    +1efg10 2 3 1.0 {"intVals":2,"strVals":"1efg10"}
    +1hij11 3 4 1.1 {"intVals":3,"strVals":"1hij11"}
    +1klm12 1 2 1.2 {"intVals":1,"strVals":"1klm12"}
    +1nop13 2 3 1.3 {"intVals":2,"strVals":"1nop13"}
    +1qrs14 3 4 1.4 {"intVals":3,"strVals":"1qrs14"}
    +1tuv15 1 2 1.0 {"intVals":1,"strVals":"1tuv15"}
    +1wxy16 2 3 1.1 {"intVals":2,"strVals":"1wxy16"}
    +1zab17 3 4 1.2 {"intVals":3,"strVals":"1zab17"}
    +1cde18 1 2 1.3 {"intVals":1,"strVals":"1cde18"}
    +1fgh19 2 3 1.4 {"intVals":2,"strVals":"1fgh19"}
    +1ijk20 3 4 1.0 {"intVals":3,"strVals":"1ijk20"}
      PREHOOK: query: ALTER TABLE parquet_columnar_access REPLACE COLUMNS (s1 string, x1 int, y1 int, f1 float)
      PREHOOK: type: ALTERTABLE_REPLACECOLS
      PREHOOK: Input: default@parquet_columnar_access

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/pcr.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/pcr.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/pcr.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/pcr.q.out Wed Apr 1 01:15:50 2015
    @@ -475,9 +475,9 @@ STAGE PLANS:
                    name: default.pcr_t1
                  name: default.pcr_t1
            Truncated Path -> Alias:
    - /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1]
    - /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1]
    - /pcr_t1/ds=2000-04-10 [$hdt$_0:pcr_t1]
    + /pcr_t1/ds=2000-04-08 [pcr_t1]
    + /pcr_t1/ds=2000-04-09 [pcr_t1]
    + /pcr_t1/ds=2000-04-10 [pcr_t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Select Operator
    @@ -1798,8 +1798,8 @@ STAGE PLANS:
                    name: default.pcr_t1
                  name: default.pcr_t1
            Truncated Path -> Alias:
    - /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1]
    - /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1]
    + /pcr_t1/ds=2000-04-08 [pcr_t1]
    + /pcr_t1/ds=2000-04-09 [pcr_t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Select Operator
    @@ -2003,8 +2003,8 @@ STAGE PLANS:
                    name: default.pcr_t1
                  name: default.pcr_t1
            Truncated Path -> Alias:
    - /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1]
    - /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1]
    + /pcr_t1/ds=2000-04-08 [pcr_t1]
    + /pcr_t1/ds=2000-04-09 [pcr_t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Select Operator
    @@ -2293,9 +2293,9 @@ STAGE PLANS:
                    name: default.pcr_t1
                  name: default.pcr_t1
            Truncated Path -> Alias:
    - /pcr_t1/ds=2000-04-08 [$hdt$_0:pcr_t1]
    - /pcr_t1/ds=2000-04-09 [$hdt$_0:pcr_t1]
    - /pcr_t1/ds=2000-04-10 [$hdt$_0:pcr_t1]
    + /pcr_t1/ds=2000-04-08 [pcr_t1]
    + /pcr_t1/ds=2000-04-09 [pcr_t1]
    + /pcr_t1/ds=2000-04-10 [pcr_t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Select Operator
    @@ -5325,7 +5325,7 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
            Needs Tagging: false
            Reduce Operator Tree:
              Select Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/ppd_vc.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/ppd_vc.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/ppd_vc.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/ppd_vc.q.out Wed Apr 1 01:15:50 2015
    @@ -258,10 +258,10 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-09/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-09/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]
    + /srcpart/ds=2008-04-09/hr=11 [srcpart]
    + /srcpart/ds=2008-04-09/hr=12 [srcpart]

        Stage: Stage-0
          Fetch Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/ppr_allchildsarenull.q.out Wed Apr 1 01:15:50 2015
    @@ -197,8 +197,8 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-0
          Fetch Operator
    @@ -521,10 +521,10 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-09/hr=11 [$hdt$_0:srcpart]
    - /srcpart/ds=2008-04-09/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=11 [srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]
    + /srcpart/ds=2008-04-09/hr=11 [srcpart]
    + /srcpart/ds=2008-04-09/hr=12 [srcpart]

        Stage: Stage-0
          Fetch Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_1.q.out Wed Apr 1 01:15:50 2015
    @@ -572,7 +572,7 @@ STAGE PLANS:
          Map Reduce
            Map Operator Tree:
                TableScan
    - alias: $hdt$_0:$hdt$_0:default.default__lineitem_ix_lineitem_ix_lshipdate_idx__
    + alias: $hdt$_0:default.default__lineitem_ix_lineitem_ix_lshipdate_idx__
                  Statistics: Num rows: 95 Data size: 8960 Basic stats: COMPLETE Column stats: NONE
                  Select Operator
                    expressions: year(l_shipdate) (type: int), month(l_shipdate) (type: int), l_shipdate (type: string), _count_of_l_shipdate (type: bigint)
    @@ -2260,7 +2260,7 @@ STAGE PLANS:
          Map Reduce
            Map Operator Tree:
                TableScan
    - alias: $hdt$_0:$hdt$_0:default.default__tblpart_tbl_part_index__
    + alias: $hdt$_0:default.default__tblpart_tbl_part_index__
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
                    predicate: (key < 10) (type: boolean)

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/ql_rewrite_gbtoidx_cbo_2.q.out Wed Apr 1 01:15:50 2015
    @@ -3192,7 +3192,7 @@ STAGE PLANS:
          Map Reduce
            Map Operator Tree:
                TableScan
    - alias: $hdt$_0:$hdt$_0:default.default__lineitem_ix_lineitem_ix_l_orderkey_idx__
    + alias: $hdt$_0:default.default__lineitem_ix_lineitem_ix_l_orderkey_idx__
                  Statistics: Num rows: 26 Data size: 2604 Basic stats: COMPLETE Column stats: NONE
                  Select Operator
                    expressions: l_orderkey (type: int), (l_orderkey + 1) (type: int), _count_of_l_orderkey (type: bigint)
    @@ -3282,7 +3282,7 @@ STAGE PLANS:
          Map Reduce
            Map Operator Tree:
                TableScan
    - alias: $hdt$_0:$hdt$_0:default.default__lineitem_ix_lineitem_ix_l_orderkey_idx__
    + alias: $hdt$_0:default.default__lineitem_ix_lineitem_ix_l_orderkey_idx__
                  Statistics: Num rows: 26 Data size: 2604 Basic stats: COMPLETE Column stats: NONE
                  Select Operator
                    expressions: (l_orderkey + 2) (type: int), l_orderkey (type: int), _count_of_l_orderkey (type: bigint)

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/skewjoin_mapjoin6.q.out Wed Apr 1 01:15:50 2015
    @@ -117,9 +117,8 @@ STAGE PLANS:
                          Lateral View Forward
                            Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                            Select Operator
    - SELECT * : (no compute)
                              expressions: _col0 (type: string), _col1 (type: array<string>)
    - outputColumnNames: org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc, org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc
    + outputColumnNames: _col0, _col1
                              Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                              Lateral View Join Operator
                                outputColumnNames: _col0, _col1, _col2
    @@ -171,9 +170,8 @@ STAGE PLANS:
                          Lateral View Forward
                            Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                            Select Operator
    - SELECT * : (no compute)
                              expressions: _col0 (type: string), _col1 (type: array<string>)
    - outputColumnNames: org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc, org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc
    + outputColumnNames: _col0, _col1
                              Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                              Lateral View Join Operator
                                outputColumnNames: _col0, _col1, _col2

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/skewjoinopt10.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/skewjoinopt10.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/skewjoinopt10.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/skewjoinopt10.q.out Wed Apr 1 01:15:50 2015
    @@ -110,9 +110,8 @@ STAGE PLANS:
                    Lateral View Forward
                      Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                      Select Operator
    - SELECT * : (no compute)
                        expressions: _col0 (type: string), _col1 (type: array<string>)
    - outputColumnNames: org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc, org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc
    + outputColumnNames: _col0, _col1
                        Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                        Lateral View Join Operator
                          outputColumnNames: _col0, _col1, _col2
    @@ -147,9 +146,8 @@ STAGE PLANS:
                    Lateral View Forward
                      Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                      Select Operator
    - SELECT * : (no compute)
                        expressions: _col0 (type: string), _col1 (type: array<string>)
    - outputColumnNames: org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc, org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc
    + outputColumnNames: _col0, _col1
                        Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
                        Lateral View Join Operator
                          outputColumnNames: _col0, _col1, _col2

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_decimal_native.q.out Wed Apr 1 01:15:50 2015
    @@ -65,8 +65,8 @@ PREHOOK: Input: default@avro_dec
      POSTHOOK: query: DESC avro_dec
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@avro_dec
    -name string from deserializer
    -value decimal(5,2) from deserializer
    +name string
    +value decimal(5,2)
      PREHOOK: query: INSERT OVERWRITE TABLE avro_dec SELECT name, value FROM dec
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dec
    @@ -121,8 +121,8 @@ PREHOOK: Input: default@avro_dec1
      POSTHOOK: query: DESC avro_dec1
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@avro_dec1
    -name string from deserializer
    -value decimal(4,1) from deserializer
    +name string
    +value decimal(4,1)
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.avro' INTO TABLE avro_dec1
      PREHOOK: type: LOAD
      #### A masked pattern was here ####

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_joins.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_joins.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_joins.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_joins.q.out Wed Apr 1 01:15:50 2015
    @@ -31,7 +31,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
          {
            "name":"extra_field",
            "type":"string",
    - "doc:":"an extra field not in the original file",
    + "doc":"an extra field not in the original file",
            "default":"fishfingers and custard"
          }
        ]
    @@ -72,7 +72,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
          {
            "name":"extra_field",
            "type":"string",
    - "doc:":"an extra field not in the original file",
    + "doc":"an extra field not in the original file",
            "default":"fishfingers and custard"
          }
        ]
    @@ -86,10 +86,10 @@ PREHOOK: Input: default@doctors4
      POSTHOOK: query: DESCRIBE doctors4
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors4
    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    -extra_field string from deserializer
    +number int Order of playing the role
    +first_name string first name of actor playing role
    +last_name string last name of actor playing role
    +extra_field string an extra field not in the original file
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4
      PREHOOK: type: LOAD
      #### A masked pattern was here ####
    @@ -166,9 +166,9 @@ PREHOOK: Input: default@episodes
      POSTHOOK: query: DESCRIBE episodes
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@episodes
    -title string from deserializer
    -air_date string from deserializer
    -doctor int from deserializer
    +title string episode title
    +air_date string initial date
    +doctor int main actor playing the Doctor in episode
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes
      PREHOOK: type: LOAD
      #### A masked pattern was here ####

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_joins_native.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_joins_native.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_joins_native.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/spark/avro_joins_native.q.out Wed Apr 1 01:15:50 2015
    @@ -28,9 +28,9 @@ PREHOOK: Input: default@doctors4
      POSTHOOK: query: DESCRIBE doctors4
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors4
    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    +number int Order of playing the role
    +first_name string first name of actor playing role
    +last_name string last name of actor playing role
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4
      PREHOOK: type: LOAD
      #### A masked pattern was here ####
    @@ -61,9 +61,9 @@ PREHOOK: Input: default@episodes
      POSTHOOK: query: DESCRIBE episodes
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@episodes
    -title string from deserializer
    -air_date string from deserializer
    -doctor int from deserializer
    +title string episode title
    +air_date string initial date
    +doctor int main actor playing the Doctor in episode
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes
      PREHOOK: type: LOAD
      #### A masked pattern was here ####

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/spark/skewjoinopt10.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/spark/skewjoinopt10.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/spark/skewjoinopt10.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/spark/skewjoinopt10.q.out Wed Apr 1 01:15:50 2015
    @@ -125,9 +125,8 @@ STAGE PLANS:
                        outputColumnNames: _col0, _col1
                        Lateral View Forward
                          Select Operator
    - SELECT * : (no compute)
                            expressions: _col0 (type: string), _col1 (type: array<string>)
    - outputColumnNames: org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc, org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc
    + outputColumnNames: _col0, _col1
                            Lateral View Join Operator
                              outputColumnNames: _col0, _col1, _col2
                              File Output Operator
    @@ -160,9 +159,8 @@ STAGE PLANS:
                        outputColumnNames: _col0, _col1
                        Lateral View Forward
                          Select Operator
    - SELECT * : (no compute)
                            expressions: _col0 (type: string), _col1 (type: array<string>)
    - outputColumnNames: org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc, org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc
    + outputColumnNames: _col0, _col1
                            Lateral View Join Operator
                              outputColumnNames: _col0, _col1, _col2
                              File Output Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/str_to_map.q.java1.7.out Wed Apr 1 01:15:50 2015
    @@ -31,10 +31,10 @@ STAGE PLANS:
                Select Operator
                  expressions: str_to_map('a=1,b=2,c=3',',','=')['a'] (type: string)
                  outputColumnNames: _col0
    - Statistics: Num rows: 500 Data size: 92000 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 500 Data size: 42500 Basic stats: COMPLETE Column stats: COMPLETE
                  Limit
                    Number of rows: 3
    - Statistics: Num rows: 3 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 3 Data size: 255 Basic stats: COMPLETE Column stats: COMPLETE
                    ListSink

      PREHOOK: query: select str_to_map('a=1,b=2,c=3',',','=')['a'] from src limit 3
    @@ -66,10 +66,10 @@ STAGE PLANS:
                Select Operator
                  expressions: str_to_map('a:1,b:2,c:3') (type: map<string,string>)
                  outputColumnNames: _col0
    - Statistics: Num rows: 500 Data size: 460000 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 500 Data size: 377000 Basic stats: COMPLETE Column stats: COMPLETE
                  Limit
                    Number of rows: 3
    - Statistics: Num rows: 3 Data size: 2760 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 3 Data size: 2262 Basic stats: COMPLETE Column stats: COMPLETE
                    ListSink

      PREHOOK: query: select str_to_map('a:1,b:2,c:3') from src limit 3
    @@ -101,10 +101,10 @@ STAGE PLANS:
                Select Operator
                  expressions: str_to_map('a:1,b:2,c:3',',',':') (type: map<string,string>)
                  outputColumnNames: _col0
    - Statistics: Num rows: 500 Data size: 460000 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 500 Data size: 377000 Basic stats: COMPLETE Column stats: COMPLETE
                  Limit
                    Number of rows: 3
    - Statistics: Num rows: 3 Data size: 2760 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 3 Data size: 2262 Basic stats: COMPLETE Column stats: COMPLETE
                    ListSink

      PREHOOK: query: select str_to_map('a:1,b:2,c:3',',',':') from src limit 3
  • Prasanthj at Apr 1, 2015 at 1:15 am
    Modified: hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_1_23.q.out Wed Apr 1 01:15:50 2015
    @@ -189,7 +189,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:t1]

        Stage: Stage-7
          Conditional Operator
    @@ -526,7 +526,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -784,7 +784,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:t1]

        Stage: Stage-7
          Conditional Operator
    @@ -1192,7 +1192,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:t1]

        Stage: Stage-7
          Conditional Operator
    @@ -1483,8 +1483,13 @@ TOK_QUERY

      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
    + Stage-4
    + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
        Stage-2 depends on stages: Stage-0
    + Stage-3
    + Stage-5
    + Stage-6 depends on stages: Stage-5

      STAGE PLANS:
        Stage: Stage-1
    @@ -1501,17 +1506,39 @@ STAGE PLANS:
                    Group By Operator
                      aggregations: count(1)
                      keys: 1 (type: int), _col1 (type: string)
    - mode: hash
    + mode: final
                      outputColumnNames: _col0, _col1, _col2
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: int), _col1 (type: string)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col2 (type: bigint)
    - auto parallelism: false
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: _col0 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int)
    + outputColumnNames: _col0, _col1, _col2
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 1
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,cnt
    + columns.comments
    + columns.types int:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl3
    + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl3
    + TotalFiles: 1
    + GatherStats: true
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
    @@ -1564,26 +1591,53 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: int), KEY._col1 (type: string)
    - mode: mergepartial
    - outputColumnNames: _col0, _col1, _col2
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: _col0 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int)
    - outputColumnNames: _col0, _col1, _col2
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + /t1 [$hdt$_0:t1]
    +
    + Stage: Stage-7
    + Conditional Operator
    +
    + Stage: Stage-4
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-0
    + Move Operator
    + tables:
    + replace: true
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,cnt
    + columns.comments
    + columns.types int:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl3
    + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl3
    +
    + Stage: Stage-2
    + Stats-Aggr Operator
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
                  File Output Operator
                    compressed: false
    - GlobalTableId: 1
    + GlobalTableId: 0
      #### A masked pattern was here ####
                    NumFilesPerFileSink: 1
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    -#### A masked pattern was here ####
                    table:
                        input format: org.apache.hadoop.mapred.TextInputFormat
                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    @@ -1601,15 +1655,98 @@ STAGE PLANS:
                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                        name: default.outputtbl3
                    TotalFiles: 1
    - GatherStats: true
    + GatherStats: false
                    MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,cnt
    + columns.comments
    + columns.types int:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl3
    + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,cnt
    + columns.comments
    + columns.types int:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl3
    + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl3
    + name: default.outputtbl3
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####

    - Stage: Stage-0
    - Move Operator
    - tables:
    - replace: true
    + Stage: Stage-5
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
      #### A masked pattern was here ####
    - table:
    + NumFilesPerFileSink: 1
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,cnt
    + columns.comments
    + columns.types int:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl3
    + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl3
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,cnt
    + columns.comments
    + columns.types int:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl3
    + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
                    input format: org.apache.hadoop.mapred.TextInputFormat
                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    @@ -1625,9 +1762,14 @@ STAGE PLANS:
      #### A masked pattern was here ####
                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                    name: default.outputtbl3
    + name: default.outputtbl3
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####

    - Stage: Stage-2
    - Stats-Aggr Operator
    + Stage: Stage-6
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####

      PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3
    @@ -1790,7 +1932,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -2011,7 +2153,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -2278,7 +2420,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:$hdt$_0:t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -2626,7 +2768,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:t1, null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:t1]
    + /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:t1, null-subquery2:$hdt$_0-subquery2:$hdt$_0:t1]

        Stage: Stage-7
          Conditional Operator
    @@ -3051,7 +3193,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:t1]
    + /t1 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -3247,7 +3389,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:t1]
    + /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:t1]
      #### A masked pattern was here ####

        Stage: Stage-8
    @@ -4305,7 +4447,7 @@ STAGE PLANS:
                    name: default.t2
                  name: default.t2
            Truncated Path -> Alias:
    - /t2 [$hdt$_0:$hdt$_0:t2]
    + /t2 [$hdt$_0:t2]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -4453,8 +4595,13 @@ TOK_QUERY

      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
    + Stage-4
    + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
        Stage-2 depends on stages: Stage-0
    + Stage-3
    + Stage-5
    + Stage-6 depends on stages: Stage-5

      STAGE PLANS:
        Stage: Stage-1
    @@ -4471,17 +4618,44 @@ STAGE PLANS:
                    Group By Operator
                      aggregations: count(1)
                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
    - mode: hash
    + mode: final
                      outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - sort order: +++
    - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col3 (type: bigint)
    - auto parallelism: false
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
    + outputColumnNames: _col0, _col1, _col2, _col3
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 1
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + TotalFiles: 1
    + GatherStats: true
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
    @@ -4534,26 +4708,58 @@ STAGE PLANS:
                    name: default.t2
                  name: default.t2
            Truncated Path -> Alias:
    - /t2 [$hdt$_0:$hdt$_0:t2]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
    - mode: mergepartial
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - GlobalTableId: 1
    + /t2 [$hdt$_0:t2]
    +
    + Stage: Stage-7
    + Conditional Operator
    +
    + Stage: Stage-4
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####
    - NumFilesPerFileSink: 1
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +
    + Stage: Stage-0
    + Move Operator
    + tables:
    + replace: true
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    +
    + Stage: Stage-2
    + Stats-Aggr Operator
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
      #### A masked pattern was here ####
    + NumFilesPerFileSink: 1
                    table:
                        input format: org.apache.hadoop.mapred.TextInputFormat
                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    @@ -4576,15 +4782,118 @@ STAGE PLANS:
                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                        name: default.outputtbl4
                    TotalFiles: 1
    - GatherStats: true
    + GatherStats: false
                    MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + name: default.outputtbl4
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####

    - Stage: Stage-0
    - Move Operator
    - tables:
    - replace: true
    + Stage: Stage-5
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
      #### A masked pattern was here ####
    - table:
    + NumFilesPerFileSink: 1
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
                    input format: org.apache.hadoop.mapred.TextInputFormat
                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    @@ -4605,9 +4914,14 @@ STAGE PLANS:
      #### A masked pattern was here ####
                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                    name: default.outputtbl4
    + name: default.outputtbl4
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####

    - Stage: Stage-2
    - Stats-Aggr Operator
    + Stage: Stage-6
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####

      PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
    @@ -4696,8 +5010,13 @@ TOK_QUERY

      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
    + Stage-4
    + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
        Stage-2 depends on stages: Stage-0
    + Stage-3
    + Stage-5
    + Stage-6 depends on stages: Stage-5

      STAGE PLANS:
        Stage: Stage-1
    @@ -4714,17 +5033,39 @@ STAGE PLANS:
                    Group By Operator
                      aggregations: count(1)
                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string), 2 (type: int)
    - mode: hash
    + mode: final
                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int)
    - sort order: ++++
    - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col4 (type: bigint)
    - auto parallelism: false
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), UDFToInteger(_col4) (type: int)
    + outputColumnNames: _col0, _col1, _col2, _col3, _col4
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 1
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl5
    + TotalFiles: 1
    + GatherStats: true
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
    @@ -4777,45 +5118,16 @@ STAGE PLANS:
                    name: default.t2
                  name: default.t2
            Truncated Path -> Alias:
    - /t2 [$hdt$_0:$hdt$_0:t2]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string), KEY._col3 (type: int)
    - mode: mergepartial
    - outputColumnNames: _col0, _col1, _col2, _col3, _col4
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), UDFToInteger(_col4) (type: int)
    - outputColumnNames: _col0, _col1, _col2, _col3, _col4
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - GlobalTableId: 1
    -#### A masked pattern was here ####
    - NumFilesPerFileSink: 1
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    -#### A masked pattern was here ####
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - properties:
    - bucket_count -1
    - columns key1,key2,key3,key4,cnt
    - columns.comments
    - columns.types int:int:string:int:int
    -#### A masked pattern was here ####
    - name default.outputtbl5
    - serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    - serialization.format 1
    - serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + /t2 [$hdt$_0:t2]
    +
    + Stage: Stage-7
    + Conditional Operator
    +
    + Stage: Stage-4
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - name: default.outputtbl5
    - TotalFiles: 1
    - GatherStats: true
    - MultiFileSpray: false

        Stage: Stage-0
          Move Operator
    @@ -4843,21 +5155,165 @@ STAGE PLANS:
          Stats-Aggr Operator
      #### A masked pattern was here ####

    -PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5
    -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
    -PREHOOK: type: QUERY
    -PREHOOK: Input: default@t2
    -PREHOOK: Output: default@outputtbl5
    -POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl5
    -SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
    -POSTHOOK: type: QUERY
    -POSTHOOK: Input: default@t2
    -POSTHOOK: Output: default@outputtbl5
    -POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ]
    -POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
    -POSTHOOK: Lineage: outputtbl5.key2 SIMPLE []
    -POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
    -POSTHOOK: Lineage: outputtbl5.key4 SIMPLE []
    + Stage: Stage-3
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl5
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl5
    + name: default.outputtbl5
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####
    +
    + Stage: Stage-5
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl5
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl5
    + name: default.outputtbl5
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####
    +
    + Stage: Stage-6
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    +PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5
    +SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@t2
    +PREHOOK: Output: default@outputtbl5
    +POSTHOOK: query: INSERT OVERWRITE TABLE outputTbl5
    +SELECT key, 1, val, 2, count(1) FROM T2 GROUP BY key, 1, val, 2
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@t2
    +POSTHOOK: Output: default@outputtbl5
    +POSTHOOK: Lineage: outputtbl5.cnt EXPRESSION [(t2)t2.null, ]
    +POSTHOOK: Lineage: outputtbl5.key1 EXPRESSION [(t2)t2.FieldSchema(name:key, type:string, comment:null), ]
    +POSTHOOK: Lineage: outputtbl5.key2 SIMPLE []
    +POSTHOOK: Lineage: outputtbl5.key3 SIMPLE [(t2)t2.FieldSchema(name:val, type:string, comment:null), ]
    +POSTHOOK: Lineage: outputtbl5.key4 SIMPLE []
      PREHOOK: query: SELECT * FROM outputTbl5
      ORDER BY key1, key2, key3, key4
      PREHOOK: type: QUERY
    @@ -4943,8 +5399,13 @@ TOK_QUERY

      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
    + Stage-4
    + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
        Stage-2 depends on stages: Stage-0
    + Stage-3
    + Stage-5
    + Stage-6 depends on stages: Stage-5

      STAGE PLANS:
        Stage: Stage-1
    @@ -4961,17 +5422,44 @@ STAGE PLANS:
                    Group By Operator
                      aggregations: count(1)
                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
    - mode: hash
    + mode: final
                      outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - sort order: +++
    - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col3 (type: bigint)
    - auto parallelism: false
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
    + outputColumnNames: _col0, _col1, _col2, _col3
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 1
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + TotalFiles: 1
    + GatherStats: true
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
    @@ -5024,26 +5512,142 @@ STAGE PLANS:
                    name: default.t2
                  name: default.t2
            Truncated Path -> Alias:
    - /t2 [$hdt$_0:$hdt$_0:$hdt$_0:t2]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
    - mode: mergepartial
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + /t2 [$hdt$_0:t2]
    +
    + Stage: Stage-7
    + Conditional Operator
    +
    + Stage: Stage-4
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-0
    + Move Operator
    + tables:
    + replace: true
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    +
    + Stage: Stage-2
    + Stats-Aggr Operator
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + name: default.outputtbl4
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####
    +
    + Stage: Stage-5
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
                  File Output Operator
                    compressed: false
    - GlobalTableId: 1
    + GlobalTableId: 0
      #### A masked pattern was here ####
                    NumFilesPerFileSink: 1
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    -#### A masked pattern was here ####
                    table:
                        input format: org.apache.hadoop.mapred.TextInputFormat
                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    @@ -5066,15 +5670,34 @@ STAGE PLANS:
                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                        name: default.outputtbl4
                    TotalFiles: 1
    - GatherStats: true
    + GatherStats: false
                    MultiFileSpray: false
    -
    - Stage: Stage-0
    - Move Operator
    - tables:
    - replace: true
    + Path -> Alias:
      #### A masked pattern was here ####
    - table:
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
                    input format: org.apache.hadoop.mapred.TextInputFormat
                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    @@ -5095,9 +5718,14 @@ STAGE PLANS:
      #### A masked pattern was here ####
                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                    name: default.outputtbl4
    + name: default.outputtbl4
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####

    - Stage: Stage-2
    - Stats-Aggr Operator
    + Stage: Stage-6
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####

      PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
    @@ -5229,8 +5857,13 @@ TOK_QUERY

      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
    + Stage-4
    + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
        Stage-2 depends on stages: Stage-0
    + Stage-3
    + Stage-5
    + Stage-6 depends on stages: Stage-5

      STAGE PLANS:
        Stage: Stage-1
    @@ -5246,19 +5879,45 @@ STAGE PLANS:
                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                    Group By Operator
                      aggregations: count(1)
    - bucketGroup: true
                      keys: _col0 (type: string), 2 (type: int), _col2 (type: string)
    - mode: hash
    + mode: final
                      outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - sort order: +++
    - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col3 (type: bigint)
    - auto parallelism: false
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
    + outputColumnNames: _col0, _col1, _col2, _col3
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 1
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + TotalFiles: 1
    + GatherStats: true
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
    @@ -5311,26 +5970,58 @@ STAGE PLANS:
                    name: default.t2
                  name: default.t2
            Truncated Path -> Alias:
    - /t2 [$hdt$_0:$hdt$_0:$hdt$_0:t2]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
    - mode: mergepartial
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + /t2 [$hdt$_0:t2]
    +
    + Stage: Stage-7
    + Conditional Operator
    +
    + Stage: Stage-4
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-0
    + Move Operator
    + tables:
    + replace: true
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    +
    + Stage: Stage-2
    + Stats-Aggr Operator
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
                  File Output Operator
                    compressed: false
    - GlobalTableId: 1
    + GlobalTableId: 0
      #### A masked pattern was here ####
                    NumFilesPerFileSink: 1
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    -#### A masked pattern was here ####
                    table:
                        input format: org.apache.hadoop.mapred.TextInputFormat
                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    @@ -5353,15 +6044,118 @@ STAGE PLANS:
                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                        name: default.outputtbl4
                    TotalFiles: 1
    - GatherStats: true
    + GatherStats: false
                    MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + name: default.outputtbl4
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####

    - Stage: Stage-0
    - Move Operator
    - tables:
    - replace: true
    + Stage: Stage-5
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
      #### A masked pattern was here ####
    - table:
    + NumFilesPerFileSink: 1
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
                    input format: org.apache.hadoop.mapred.TextInputFormat
                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    @@ -5382,9 +6176,14 @@ STAGE PLANS:
      #### A masked pattern was here ####
                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                    name: default.outputtbl4
    + name: default.outputtbl4
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####

    - Stage: Stage-2
    - Stats-Aggr Operator
    + Stage: Stage-6
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####

      PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_6.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_6.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_6.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_6.q.out Wed Apr 1 01:15:50 2015
    @@ -425,7 +425,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1/ds=2 [$hdt$_0:$hdt$_0:t1]
    + /t1/ds=2 [$hdt$_0:t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
  • Prasanthj at Apr 1, 2015 at 1:15 am
    Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java (original)
    +++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java Wed Apr 1 01:15:50 2015
    @@ -24,6 +24,7 @@ import java.io.IOException;
      import java.io.InputStream;
      import java.io.PrintStream;
      import java.net.URI;
    +import java.net.URISyntaxException;
      import java.net.URLClassLoader;
      import java.sql.Timestamp;
      import java.util.ArrayList;
    @@ -32,6 +33,7 @@ import java.util.Collection;
      import java.util.HashMap;
      import java.util.HashSet;
      import java.util.LinkedHashMap;
    +import java.util.LinkedList;
      import java.util.List;
      import java.util.Map;
      import java.util.Set;
    @@ -269,6 +271,9 @@ public class SessionState {
         */
        private Timestamp queryCurrentTimestamp;

    + private ResourceMaps resourceMaps;
    +
    + private DependencyResolver dependencyResolver;
        /**
         * Get the lineage state stored in this session.
         *
    @@ -334,6 +339,8 @@ public class SessionState {
          this.userName = userName;
          isSilent = conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT);
          ls = new LineageState();
    + resourceMaps = new ResourceMaps();
    + dependencyResolver = new DependencyResolver();
          // Must be deterministic order map for consistent q-test output across Java versions
          overriddenConfigurations = new LinkedHashMap<String, String>();
          overriddenConfigurations.putAll(HiveConf.getConfSystemProperties());
    @@ -1119,8 +1126,7 @@ public class SessionState {
          return null;
        }

    - private final HashMap<ResourceType, Set<String>> resource_map =
    - new HashMap<ResourceType, Set<String>>();
    +

        public String add_resource(ResourceType t, String value) throws RuntimeException {
          return add_resource(t, value, false);
    @@ -1143,37 +1149,88 @@ public class SessionState {

        public List<String> add_resources(ResourceType t, Collection<String> values, boolean convertToUnix)
            throws RuntimeException {
    - Set<String> resourceMap = getResourceMap(t);
    -
    + Set<String> resourceSet = resourceMaps.getResourceSet(t);
    + Map<String, Set<String>> resourcePathMap = resourceMaps.getResourcePathMap(t);
    + Map<String, Set<String>> reverseResourcePathMap = resourceMaps.getReverseResourcePathMap(t);
          List<String> localized = new ArrayList<String>();
          try {
            for (String value : values) {
    - localized.add(downloadResource(value, convertToUnix));
    - }
    + String key;
    +
    + //get the local path of downloaded jars.
    + List<URI> downloadedURLs = resolveAndDownload(t, value, convertToUnix);
    +
    + if (getURLType(value).equals("ivy")) {
    + // get the key to store in map
    + key = new URI(value).getAuthority();
    + } else {
    + // for local file and hdfs, key and value are same.
    + key = downloadedURLs.get(0).toString();
    + }
    + Set<String> downloadedValues = new HashSet<String>();
    +
    + for (URI uri : downloadedURLs) {
    + String resourceValue = uri.toString();
    + downloadedValues.add(resourceValue);
    + localized.add(resourceValue);
    + if (reverseResourcePathMap.containsKey(resourceValue)) {
    + if (!reverseResourcePathMap.get(resourceValue).contains(key)) {
    + reverseResourcePathMap.get(resourceValue).add(key);
    + }
    + } else {
    + Set<String> addSet = new HashSet<String>();
    + addSet.add(key);
    + reverseResourcePathMap.put(resourceValue, addSet);

    - t.preHook(resourceMap, localized);
    + }
    + }
    + resourcePathMap.put(key, downloadedValues);
    + }
    + t.preHook(resourceSet, localized);

          } catch (RuntimeException e) {
    - getConsole().printError(e.getMessage(), "\n"
    - + org.apache.hadoop.util.StringUtils.stringifyException(e));
    + getConsole().printError(e.getMessage(), "\n" + org.apache.hadoop.util.StringUtils.stringifyException(e));
            throw e;
    + } catch (URISyntaxException e) {
    + getConsole().printError(e.getMessage());
    + throw new RuntimeException(e);
    + } catch (IOException e) {
    + getConsole().printError(e.getMessage());
    + throw new RuntimeException(e);
          }
    -
          getConsole().printInfo("Added resources: " + values);
    - resourceMap.addAll(localized);
    -
    + resourceSet.addAll(localized);
          return localized;
        }

    - private Set<String> getResourceMap(ResourceType t) {
    - Set<String> result = resource_map.get(t);
    - if (result == null) {
    - result = new HashSet<String>();
    - resource_map.put(t, result);
    + private static String getURLType(String value) throws URISyntaxException {
    + URI uri = new URI(value);
    + String scheme = uri.getScheme() == null ? null : uri.getScheme().toLowerCase();
    + if (scheme == null || scheme.equals("file")) {
    + return "file";
    + } else if (scheme.equals("hdfs") || scheme.equals("ivy")) {
    + return scheme;
    + } else {
    + throw new RuntimeException("invalid url: " + uri + ", expecting ( file | hdfs | ivy) as url scheme. ");
    + }
    + }
    +
    + List<URI> resolveAndDownload(ResourceType t, String value, boolean convertToUnix) throws URISyntaxException,
    + IOException {
    + URI uri = new URI(value);
    + if (getURLType(value).equals("file")) {
    + return Arrays.asList(uri);
    + } else if (getURLType(value).equals("ivy")) {
    + return dependencyResolver.downloadDependencies(uri);
    + } else if (getURLType(value).equals("hdfs")) {
    + return Arrays.asList(new URI(downloadResource(value, convertToUnix)));
    + } else {
    + throw new RuntimeException("Invalid url " + uri);
          }
    - return result;
        }

    +
    +
        /**
         * Returns true if it is from any external File Systems except local
         */
    @@ -1218,16 +1275,49 @@ public class SessionState {
          return value;
        }

    - public void delete_resources(ResourceType t, List<String> value) {
    - Set<String> resources = resource_map.get(t);
    - if (resources != null && !resources.isEmpty()) {
    - t.postHook(resources, value);
    - resources.removeAll(value);
    + public void delete_resources(ResourceType t, List<String> values) {
    + Set<String> resources = resourceMaps.getResourceSet(t);
    + if (resources == null || resources.isEmpty()) {
    + return;
    + }
    +
    + Map<String, Set<String>> resourcePathMap = resourceMaps.getResourcePathMap(t);
    + Map<String, Set<String>> reverseResourcePathMap = resourceMaps.getReverseResourcePathMap(t);
    + List<String> deleteList = new LinkedList<String>();
    + for (String value : values) {
    + String key = value;
    + try {
    + if (getURLType(value).equals("ivy")) {
    + key = new URI(value).getAuthority();
    + }
    + } catch (URISyntaxException e) {
    + throw new RuntimeException("Invalid uri string " + value + ", " + e.getMessage());
    + }
    +
    + // get all the dependencies to delete
    +
    + Set<String> resourcePaths = resourcePathMap.get(key);
    + if (resourcePaths == null) {
    + return;
    + }
    + for (String resourceValue : resourcePaths) {
    + reverseResourcePathMap.get(resourceValue).remove(key);
    +
    + // delete a dependency only if no other resource depends on it.
    + if (reverseResourcePathMap.get(resourceValue).isEmpty()) {
    + deleteList.add(resourceValue);
    + reverseResourcePathMap.remove(resourceValue);
    + }
    + }
    + resourcePathMap.remove(key);
          }
    + t.postHook(resources, deleteList);
    + resources.removeAll(deleteList);
        }

    +
        public Set<String> list_resource(ResourceType t, List<String> filter) {
    - Set<String> orig = resource_map.get(t);
    + Set<String> orig = resourceMaps.getResourceSet(t);
          if (orig == null) {
            return null;
          }
    @@ -1245,10 +1335,10 @@ public class SessionState {
        }

        public void delete_resources(ResourceType t) {
    - Set<String> resources = resource_map.get(t);
    + Set<String> resources = resourceMaps.getResourceSet(t);
          if (resources != null && !resources.isEmpty()) {
            delete_resources(t, new ArrayList<String>(resources));
    - resource_map.remove(t);
    + resourceMaps.getResourceMap().remove(t);
          }
        }

    @@ -1512,3 +1602,51 @@ public class SessionState {
          return queryCurrentTimestamp;
        }
      }
    +
    +class ResourceMaps {
    +
    + private final Map<SessionState.ResourceType, Set<String>> resource_map;
    + //Given jar to add is stored as key and all its transitive dependencies as value. Used for deleting transitive dependencies.
    + private final Map<SessionState.ResourceType, Map<String, Set<String>>> resource_path_map;
    + // stores all the downloaded resources as key and the jars which depend on these resources as values in form of a list. Used for deleting transitive dependencies.
    + private final Map<SessionState.ResourceType, Map<String, Set<String>>> reverse_resource_path_map;
    +
    + public ResourceMaps() {
    + resource_map = new HashMap<SessionState.ResourceType, Set<String>>();
    + resource_path_map = new HashMap<SessionState.ResourceType, Map<String, Set<String>>>();
    + reverse_resource_path_map = new HashMap<SessionState.ResourceType, Map<String, Set<String>>>();
    +
    + }
    +
    + public Map<SessionState.ResourceType, Set<String>> getResourceMap() {
    + return resource_map;
    + }
    +
    + public Set<String> getResourceSet(SessionState.ResourceType t) {
    + Set<String> result = resource_map.get(t);
    + if (result == null) {
    + result = new HashSet<String>();
    + resource_map.put(t, result);
    + }
    + return result;
    + }
    +
    + public Map<String, Set<String>> getResourcePathMap(SessionState.ResourceType t) {
    + Map<String, Set<String>> result = resource_path_map.get(t);
    + if (result == null) {
    + result = new HashMap<String, Set<String>>();
    + resource_path_map.put(t, result);
    + }
    + return result;
    + }
    +
    + public Map<String, Set<String>> getReverseResourcePathMap(SessionState.ResourceType t) {
    + Map<String, Set<String>> result = reverse_resource_path_map.get(t);
    + if (result == null) {
    + result = new HashMap<String, Set<String>>();
    + reverse_resource_path_map.put(t, result);
    + }
    + return result;
    + }
    +
    +}

    Modified: hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFormatNumber.java
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFormatNumber.java?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFormatNumber.java (original)
    +++ hive/branches/llap/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFFormatNumber.java Wed Apr 1 01:15:50 2015
    @@ -147,7 +147,13 @@ public class GenericUDFFormatNumber exte

        @Override
        public Object evaluate(DeferredObject[] arguments) throws HiveException {
    - int dValue = ((IntObjectInspector) argumentOIs[1]).get(arguments[1].get());
    + Object arg0;
    + Object arg1;
    + if ((arg0 = arguments[0].get()) == null || (arg1 = arguments[1].get()) == null) {
    + return null;
    + }
    +
    + int dValue = ((IntObjectInspector) argumentOIs[1]).get(arg1);

          if (dValue < 0) {
            throw new HiveException("Argument 2 of function FORMAT_NUMBER must be >= 0, but \""
    @@ -181,26 +187,26 @@ public class GenericUDFFormatNumber exte
          switch (xObjectInspector.getPrimitiveCategory()) {
            case VOID:
            case DOUBLE:
    - xDoubleValue = ((DoubleObjectInspector) argumentOIs[0]).get(arguments[0].get());
    + xDoubleValue = ((DoubleObjectInspector) argumentOIs[0]).get(arg0);
              resultText.set(numberFormat.format(xDoubleValue));
              break;
            case FLOAT:
    - xFloatValue = ((FloatObjectInspector) argumentOIs[0]).get(arguments[0].get());
    + xFloatValue = ((FloatObjectInspector) argumentOIs[0]).get(arg0);
              resultText.set(numberFormat.format(xFloatValue));
              break;
            case DECIMAL:
              xDecimalValue = ((HiveDecimalObjectInspector) argumentOIs[0])
    - .getPrimitiveJavaObject(arguments[0].get());
    + .getPrimitiveJavaObject(arg0);
              resultText.set(numberFormat.format(xDecimalValue.bigDecimalValue()));
              break;
            case BYTE:
            case SHORT:
            case INT:
    - xIntValue = ((IntObjectInspector) argumentOIs[0]).get(arguments[0].get());
    + xIntValue = ((IntObjectInspector) argumentOIs[0]).get(arg0);
              resultText.set(numberFormat.format(xIntValue));
              break;
            case LONG:
    - xLongValue = ((LongObjectInspector) argumentOIs[0]).get(arguments[0].get());
    + xLongValue = ((LongObjectInspector) argumentOIs[0]).get(arg0);
              resultText.set(numberFormat.format(xLongValue));
              break;
            default:

    Modified: hive/branches/llap/ql/src/test/queries/clientpositive/avro_compression_enabled.q
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/avro_compression_enabled.q?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/queries/clientpositive/avro_compression_enabled.q (original)
    +++ hive/branches/llap/ql/src/test/queries/clientpositive/avro_compression_enabled.q Wed Apr 1 01:15:50 2015
    @@ -29,7 +29,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
          {
            "name":"extra_field",
            "type":"string",
    - "doc:":"an extra field not in the original file",
    + "doc":"an extra field not in the original file",
            "default":"fishfingers and custard"
          }
        ]

    Modified: hive/branches/llap/ql/src/test/queries/clientpositive/avro_evolved_schemas.q
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/avro_evolved_schemas.q?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/queries/clientpositive/avro_evolved_schemas.q (original)
    +++ hive/branches/llap/ql/src/test/queries/clientpositive/avro_evolved_schemas.q Wed Apr 1 01:15:50 2015
    @@ -30,7 +30,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
          {
            "name":"extra_field",
            "type":"string",
    - "doc:":"an extra field not in the original file",
    + "doc":"an extra field not in the original file",
            "default":"fishfingers and custard"
          }
        ]

    Modified: hive/branches/llap/ql/src/test/queries/clientpositive/avro_joins.q
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/avro_joins.q?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/queries/clientpositive/avro_joins.q (original)
    +++ hive/branches/llap/ql/src/test/queries/clientpositive/avro_joins.q Wed Apr 1 01:15:50 2015
    @@ -31,7 +31,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
          {
            "name":"extra_field",
            "type":"string",
    - "doc:":"an extra field not in the original file",
    + "doc":"an extra field not in the original file",
            "default":"fishfingers and custard"
          }
        ]

    Modified: hive/branches/llap/ql/src/test/queries/clientpositive/leadlag.q
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/leadlag.q?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/queries/clientpositive/leadlag.q (original)
    +++ hive/branches/llap/ql/src/test/queries/clientpositive/leadlag.q Wed Apr 1 01:15:50 2015
    @@ -36,7 +36,10 @@ sum(p_size - lag(p_size,1)) over w1 as d
      from part
      window w1 as (distribute by p_mfgr sort by p_name rows between 2 preceding and 2 following) ;

    +set hive.cbo.enable=false;
      -- 6. testRankInLead
    +-- disable cbo because of CALCITE-653
    +
      select p_mfgr, p_name, p_size, r1,
      lead(r1,1,r1) over (distribute by p_mfgr sort by p_name) as deltaRank
      from (
    @@ -45,6 +48,7 @@ rank() over(distribute by p_mfgr sort b
      from part
      ) a;

    +set hive.cbo.enable=true;
      -- 7. testLeadWithPTF
      select p_mfgr, p_name,
      rank() over(distribute by p_mfgr sort by p_name) as r,

    Modified: hive/branches/llap/ql/src/test/queries/clientpositive/parquet_columnar.q
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/parquet_columnar.q?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/queries/clientpositive/parquet_columnar.q (original)
    +++ hive/branches/llap/ql/src/test/queries/clientpositive/parquet_columnar.q Wed Apr 1 01:15:50 2015
    @@ -5,8 +5,8 @@ DROP TABLE IF EXISTS parquet_columnar_ac
      DROP TABLE IF EXISTS parquet_columnar_renamed;

      CREATE TABLE parquet_columnar_access_stage (
    - s string,
    - i int,
    + s string,
    + i int,
          f float
        ) ROW FORMAT DELIMITED
        FIELDS TERMINATED BY '|';
    @@ -15,12 +15,14 @@ CREATE TABLE parquet_columnar_access (
          s string,
          x int,
          y int,
    - f float
    + f float,
    + address struct<intVals:int,strVals:string>
        ) STORED AS PARQUET;

      LOAD DATA LOCAL INPATH '../../data/files/parquet_columnar.txt' OVERWRITE INTO TABLE parquet_columnar_access_stage;

    -INSERT OVERWRITE TABLE parquet_columnar_access SELECT s, i, (i + 1), f FROM parquet_columnar_access_stage;
    +INSERT OVERWRITE TABLE parquet_columnar_access SELECT s, i, (i + 1), f, named_struct('intVals',
    +i,'strVals',s) FROM parquet_columnar_access_stage;
      SELECT * FROM parquet_columnar_access;

      ALTER TABLE parquet_columnar_access REPLACE COLUMNS (s1 string, x1 int, y1 int, f1 float);

    Modified: hive/branches/llap/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q (original)
    +++ hive/branches/llap/ql/src/test/queries/clientpositive/udaf_percentile_approx_23.q Wed Apr 1 01:15:50 2015
    @@ -88,11 +88,11 @@ select * from t12;
      set hive.cbo.enable=false;

      -- NaN
    -explain
    +explain
      select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket;
      select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket;

      -- with CBO
    -explain
    -select percentile_approx(key, 0.5) from bucket;
    +explain
      select percentile_approx(key, 0.5) from bucket;
    +select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket;

    Modified: hive/branches/llap/ql/src/test/queries/clientpositive/udf_format_number.q
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/queries/clientpositive/udf_format_number.q?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/queries/clientpositive/udf_format_number.q (original)
    +++ hive/branches/llap/ql/src/test/queries/clientpositive/udf_format_number.q Wed Apr 1 01:15:50 2015
    @@ -71,3 +71,9 @@ SELECT format_number(12332.123456BD, 4),
          format_number(-12332.2BD, 0),
          format_number(CAST(12332.567 AS DECIMAL(8, 1)), 4)
      FROM src tablesample (1 rows);
    +
    +-- nulls
    +SELECT
    + format_number(cast(null as int), 0),
    + format_number(12332.123456BD, cast(null as int)),
    + format_number(cast(null as int), cast(null as int));

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/alter_partition_coltype.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/alter_partition_coltype.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/alter_partition_coltype.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/alter_partition_coltype.q.out Wed Apr 1 01:15:50 2015
    @@ -568,7 +568,7 @@ STAGE PLANS:
                    name: default.alter_coltype
                  name: default.alter_coltype
            Truncated Path -> Alias:
    - /alter_coltype/dt=100/ts=3.0 [$hdt$_0:$hdt$_0:alter_coltype]
    + /alter_coltype/dt=100/ts=3.0 [$hdt$_0:alter_coltype]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/annotate_stats_select.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/annotate_stats_select.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/annotate_stats_select.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/annotate_stats_select.q.out Wed Apr 1 01:15:50 2015
    @@ -531,7 +531,7 @@ STAGE PLANS:
                alias: alltypes_orc
                Statistics: Num rows: 2 Data size: 1686 Basic stats: COMPLETE Column stats: COMPLETE
                Select Operator
    - expressions: CAST( '58.174' AS decimal(10,0)) (type: decimal(10,0))
    + expressions: 58 (type: decimal(10,0))
                  outputColumnNames: _col0
                  Statistics: Num rows: 2 Data size: 224 Basic stats: COMPLETE Column stats: COMPLETE
                  ListSink
    @@ -556,10 +556,10 @@ STAGE PLANS:
                  Select Operator
                    expressions: array(1,2,3) (type: array<int>)
                    outputColumnNames: _col0
    - Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                    File Output Operator
                      compressed: false
    - Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 2 Data size: 112 Basic stats: COMPLETE Column stats: COMPLETE
                      table:
                          input format: org.apache.hadoop.mapred.TextInputFormat
                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    @@ -591,10 +591,10 @@ STAGE PLANS:
                  Select Operator
                    expressions: str_to_map('a=1 b=2 c=3',' ','=') (type: map<string,string>)
                    outputColumnNames: _col0
    - Statistics: Num rows: 2 Data size: 1840 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 2 Data size: 1508 Basic stats: COMPLETE Column stats: COMPLETE
                    File Output Operator
                      compressed: false
    - Statistics: Num rows: 2 Data size: 1840 Basic stats: COMPLETE Column stats: COMPLETE
    + Statistics: Num rows: 2 Data size: 1508 Basic stats: COMPLETE Column stats: COMPLETE
                      table:
                          input format: org.apache.hadoop.mapred.TextInputFormat
                          output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/auto_join1.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/auto_join1.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/auto_join1.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/auto_join1.q.out Wed Apr 1 01:15:50 2015
    @@ -24,11 +24,11 @@ STAGE PLANS:
        Stage: Stage-5
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:src1
    + $hdt$_0:src1
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:src1
    + $hdt$_0:src1
                TableScan
                  alias: src1
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/auto_join10.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/auto_join10.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/auto_join10.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/auto_join10.q.out Wed Apr 1 01:15:50 2015
    @@ -23,11 +23,11 @@ STAGE PLANS:
        Stage: Stage-5
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:src
    + $hdt$_0:$hdt$_0:src
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:src
    + $hdt$_0:$hdt$_0:src
                TableScan
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/auto_join11.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/auto_join11.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/auto_join11.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/auto_join11.q.out Wed Apr 1 01:15:50 2015
    @@ -23,11 +23,11 @@ STAGE PLANS:
        Stage: Stage-5
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:src
    + $hdt$_0:$hdt$_0:src
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:src
    + $hdt$_0:$hdt$_0:src
                TableScan
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/auto_join12.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/auto_join12.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/auto_join12.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/auto_join12.q.out Wed Apr 1 01:15:50 2015
    @@ -29,14 +29,14 @@ STAGE PLANS:
        Stage: Stage-7
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:src
    + $hdt$_0:$hdt$_0:$hdt$_0:src
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:src
    + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:src
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:src
    + $hdt$_0:$hdt$_0:$hdt$_0:src
                TableScan
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -51,7 +51,7 @@ STAGE PLANS:
                        keys:
                          0 _col0 (type: string)
                          1 _col0 (type: string)
    - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:src
    + $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_1:src
                TableScan
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/auto_join13.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/auto_join13.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/auto_join13.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/auto_join13.q.out Wed Apr 1 01:15:50 2015
    @@ -29,14 +29,14 @@ STAGE PLANS:
        Stage: Stage-7
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:src
    + $hdt$_0:$hdt$_0:src
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:src
    + $hdt$_0:$hdt$_1:$hdt$_1:src
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:src
    + $hdt$_0:$hdt$_0:src
                TableScan
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -51,7 +51,7 @@ STAGE PLANS:
                        keys:
                          0 UDFToDouble(_col0) (type: double)
                          1 (UDFToDouble(_col2) + UDFToDouble(_col0)) (type: double)
    - $hdt$_0:$hdt$_0:$hdt$_1:$hdt$_1:src
    + $hdt$_0:$hdt$_1:$hdt$_1:src
                TableScan
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/auto_join14.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/auto_join14.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/auto_join14.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/auto_join14.q.out Wed Apr 1 01:15:50 2015
    @@ -28,11 +28,11 @@ STAGE PLANS:
        Stage: Stage-5
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_1:src
    + $hdt$_1:src
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_1:src
    + $hdt$_1:src
                TableScan
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/auto_join22.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/auto_join22.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/auto_join22.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/auto_join22.q.out Wed Apr 1 01:15:50 2015
    @@ -13,14 +13,14 @@ STAGE PLANS:
        Stage: Stage-8
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:src4
    + $hdt$_0:$hdt$_0:$hdt$_0:src4
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:src4
    + $hdt$_0:$hdt$_1:src4
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:$hdt$_0:src4
    + $hdt$_0:$hdt$_0:$hdt$_0:src4
                TableScan
                  alias: src4
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -35,7 +35,7 @@ STAGE PLANS:
                        keys:
                          0 _col0 (type: string)
                          1 _col0 (type: string)
    - $hdt$_0:$hdt$_0:$hdt$_0:$hdt$_1:src4
    + $hdt$_0:$hdt$_1:src4
                TableScan
                  alias: src4
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/auto_join26.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/auto_join26.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/auto_join26.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/auto_join26.q.out Wed Apr 1 01:15:50 2015
    @@ -28,11 +28,11 @@ STAGE PLANS:
        Stage: Stage-6
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$hdt$_1:x
    + $hdt$_0:$hdt$_1:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$hdt$_1:x
    + $hdt$_0:$hdt$_1:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/auto_join_nulls.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/auto_join_nulls.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/auto_join_nulls.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/auto_join_nulls.q.out Wed Apr 1 01:15:50 2015
    @@ -34,7 +34,7 @@ POSTHOOK: type: QUERY
      POSTHOOK: Input: default@myinput1
      #### A masked pattern was here ####
      13630578
    -Warning: Map Join MAPJOIN[18][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
    +Warning: Map Join MAPJOIN[17][bigTable=?] in task 'Stage-2:MAPRED' is a cross product
      PREHOOK: query: SELECT sum(hash(a.key,a.value,b.key,b.value)) FROM myinput1 a RIGHT OUTER JOIN myinput1 b
      PREHOOK: type: QUERY
      PREHOOK: Input: default@myinput1

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/auto_join_without_localtask.q.out Wed Apr 1 01:15:50 2015
    @@ -270,11 +270,11 @@ STAGE PLANS:
        Stage: Stage-14
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_1:a
    + $hdt$_1:a
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_1:a
    + $hdt$_1:a
                TableScan
                  alias: a
                  Filter Operator
    @@ -319,11 +319,11 @@ STAGE PLANS:
        Stage: Stage-12
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_2:a
    + $hdt$_2:a
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_2:a
    + $hdt$_2:a
                TableScan
                  alias: a
                  Filter Operator
    @@ -406,11 +406,11 @@ STAGE PLANS:
            Local Work:
              Map Reduce Local Work
                Alias -> Map Local Tables:
    - $hdt$_0:$INTNAME
    + $INTNAME
                    Fetch Operator
                      limit: -1
                Alias -> Map Local Operator Tree:
    - $hdt$_0:$INTNAME
    + $INTNAME
                    TableScan

        Stage: Stage-2
    @@ -457,11 +457,11 @@ STAGE PLANS:
        Stage: Stage-15
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:a
    + $hdt$_0:a
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:a
    + $hdt$_0:a
                TableScan
                  alias: a
                  Filter Operator
    @@ -639,11 +639,11 @@ STAGE PLANS:
        Stage: Stage-13
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_1:$hdt$_2:a
    + $hdt$_1:$hdt$_2:a
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_1:$hdt$_2:a
    + $hdt$_1:$hdt$_2:a
                TableScan
                  alias: a
                  Filter Operator
    @@ -714,11 +714,11 @@ STAGE PLANS:
            Local Work:
              Map Reduce Local Work
                Alias -> Map Local Tables:
    - $hdt$_0:$INTNAME
    + $INTNAME
                    Fetch Operator
                      limit: -1
                Alias -> Map Local Operator Tree:
    - $hdt$_0:$INTNAME
    + $INTNAME
                    TableScan

        Stage: Stage-2
    @@ -748,11 +748,11 @@ STAGE PLANS:
        Stage: Stage-12
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:a
    + $hdt$_0:a
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:a
    + $hdt$_0:a
                TableScan
                  alias: a
                  Filter Operator
    @@ -836,11 +836,11 @@ STAGE PLANS:
        Stage: Stage-14
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_1:$hdt$_1:a
    + $hdt$_1:$hdt$_1:a
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_1:$hdt$_1:a
    + $hdt$_1:$hdt$_1:a
                TableScan
                  alias: a
                  Filter Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column.q.out Wed Apr 1 01:15:50 2015
    @@ -24,8 +24,8 @@ PREHOOK: Input: default@doctors
      POSTHOOK: query: DESCRIBE doctors
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors
    -number int from deserializer
    -first_name string from deserializer
    +number int
    +first_name string
      PREHOOK: query: ALTER TABLE doctors ADD COLUMNS (last_name string)
      PREHOOK: type: ALTERTABLE_ADDCOLS
      PREHOOK: Input: default@doctors
    @@ -40,9 +40,9 @@ PREHOOK: Input: default@doctors
      POSTHOOK: query: DESCRIBE doctors
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors
    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    +number int
    +first_name string
    +last_name string
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors
      PREHOOK: type: LOAD
      #### A masked pattern was here ####

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column2.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column2.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column2.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column2.q.out Wed Apr 1 01:15:50 2015
    @@ -50,8 +50,8 @@ POSTHOOK: query: INSERT INTO TABLE docto
      POSTHOOK: type: QUERY
      POSTHOOK: Input: default@doctors
      POSTHOOK: Output: default@doctors_copy
    -POSTHOOK: Lineage: doctors_copy.first_name SIMPLE [(doctors)doctors.FieldSchema(name:first_name, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: doctors_copy.number SIMPLE [(doctors)doctors.FieldSchema(name:number, type:int, comment:from deserializer), ]
    +POSTHOOK: Lineage: doctors_copy.first_name SIMPLE [(doctors)doctors.FieldSchema(name:first_name, type:string, comment:), ]
    +POSTHOOK: Lineage: doctors_copy.number SIMPLE [(doctors)doctors.FieldSchema(name:number, type:int, comment:), ]
      PREHOOK: query: ALTER TABLE doctors_copy ADD COLUMNS (last_name string)
      PREHOOK: type: ALTERTABLE_ADDCOLS
      PREHOOK: Input: default@doctors_copy
    @@ -74,9 +74,9 @@ PREHOOK: Input: default@doctors_copy
      POSTHOOK: query: DESCRIBE doctors_copy
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors_copy
    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    +number int
    +first_name string
    +last_name string
      PREHOOK: query: SELECT * FROM doctors_copy
      PREHOOK: type: QUERY
      PREHOOK: Input: default@doctors_copy

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column3.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column3.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column3.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_add_column3.q.out Wed Apr 1 01:15:50 2015
    @@ -52,8 +52,8 @@ POSTHOOK: query: INSERT INTO TABLE docto
      POSTHOOK: type: QUERY
      POSTHOOK: Input: default@doctors
      POSTHOOK: Output: default@doctors_copy@part=1
    -POSTHOOK: Lineage: doctors_copy PARTITION(part=1).first_name SIMPLE [(doctors)doctors.FieldSchema(name:first_name, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: doctors_copy PARTITION(part=1).number SIMPLE [(doctors)doctors.FieldSchema(name:number, type:int, comment:from deserializer), ]
    +POSTHOOK: Lineage: doctors_copy PARTITION(part=1).first_name SIMPLE [(doctors)doctors.FieldSchema(name:first_name, type:string, comment:), ]
    +POSTHOOK: Lineage: doctors_copy PARTITION(part=1).number SIMPLE [(doctors)doctors.FieldSchema(name:number, type:int, comment:), ]
      PREHOOK: query: ALTER TABLE doctors_copy ADD COLUMNS (last_name string)
      PREHOOK: type: ALTERTABLE_ADDCOLS
      PREHOOK: Input: default@doctors_copy
    @@ -68,9 +68,9 @@ PREHOOK: Input: default@doctors_copy
      POSTHOOK: query: DESCRIBE doctors_copy
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors_copy
    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    +number int
    +first_name string
    +last_name string
      part int

      # Partition Information

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_change_schema.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_change_schema.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_change_schema.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_change_schema.q.out Wed Apr 1 01:15:50 2015
    @@ -38,8 +38,8 @@ PREHOOK: Input: default@avro2
      POSTHOOK: query: DESCRIBE avro2
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@avro2
    -string1 string from deserializer
    -string2 string from deserializer
    +string1 string
    +string2 string
      PREHOOK: query: ALTER TABLE avro2 SET TBLPROPERTIES ('avro.schema.literal'='{ "namespace": "org.apache.hive",
        "name": "second_schema",
        "type": "record",
    @@ -68,6 +68,6 @@ PREHOOK: Input: default@avro2
      POSTHOOK: query: DESCRIBE avro2
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@avro2
    -int1 int from deserializer
    -float1 float from deserializer
    -double1 double from deserializer
    +int1 int
    +float1 float
    +double1 double

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_compression_enabled.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_compression_enabled.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_compression_enabled.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_compression_enabled.q.out Wed Apr 1 01:15:50 2015
    @@ -29,7 +29,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
          {
            "name":"extra_field",
            "type":"string",
    - "doc:":"an extra field not in the original file",
    + "doc":"an extra field not in the original file",
            "default":"fishfingers and custard"
          }
        ]
    @@ -68,7 +68,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
          {
            "name":"extra_field",
            "type":"string",
    - "doc:":"an extra field not in the original file",
    + "doc":"an extra field not in the original file",
            "default":"fishfingers and custard"
          }
        ]

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_decimal.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_decimal.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_decimal.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_decimal.q.out Wed Apr 1 01:15:50 2015
    @@ -79,8 +79,8 @@ PREHOOK: Input: default@avro_dec
      POSTHOOK: query: DESC avro_dec
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@avro_dec
    -name string from deserializer
    -value decimal(5,2) from deserializer
    +name string
    +value decimal(5,2)
      PREHOOK: query: INSERT OVERWRITE TABLE avro_dec select name, value from dec
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dec
    @@ -153,8 +153,8 @@ PREHOOK: Input: default@avro_dec1
      POSTHOOK: query: DESC avro_dec1
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@avro_dec1
    -name string from deserializer
    -value decimal(4,1) from deserializer
    +name string
    +value decimal(4,1)
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.avro' into TABLE avro_dec1
      PREHOOK: type: LOAD
      #### A masked pattern was here ####

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_decimal_native.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_decimal_native.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_decimal_native.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_decimal_native.q.out Wed Apr 1 01:15:50 2015
    @@ -65,8 +65,8 @@ PREHOOK: Input: default@avro_dec
      POSTHOOK: query: DESC avro_dec
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@avro_dec
    -name string from deserializer
    -value decimal(5,2) from deserializer
    +name string
    +value decimal(5,2)
      PREHOOK: query: INSERT OVERWRITE TABLE avro_dec SELECT name, value FROM dec
      PREHOOK: type: QUERY
      PREHOOK: Input: default@dec
    @@ -121,8 +121,8 @@ PREHOOK: Input: default@avro_dec1
      POSTHOOK: query: DESC avro_dec1
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@avro_dec1
    -name string from deserializer
    -value decimal(4,1) from deserializer
    +name string
    +value decimal(4,1)
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/dec.avro' INTO TABLE avro_dec1
      PREHOOK: type: LOAD
      #### A masked pattern was here ####

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_evolved_schemas.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_evolved_schemas.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_evolved_schemas.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_evolved_schemas.q.out Wed Apr 1 01:15:50 2015
    @@ -30,7 +30,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
          {
            "name":"extra_field",
            "type":"string",
    - "doc:":"an extra field not in the original file",
    + "doc":"an extra field not in the original file",
            "default":"fishfingers and custard"
          }
        ]
    @@ -70,7 +70,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
          {
            "name":"extra_field",
            "type":"string",
    - "doc:":"an extra field not in the original file",
    + "doc":"an extra field not in the original file",
            "default":"fishfingers and custard"
          }
        ]
    @@ -84,10 +84,10 @@ PREHOOK: Input: default@doctors_with_new
      POSTHOOK: query: DESCRIBE doctors_with_new_field
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors_with_new_field
    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    -extra_field string from deserializer
    +number int Order of playing the role
    +first_name string first name of actor playing role
    +last_name string last name of actor playing role
    +extra_field string an extra field not in the original file
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors_with_new_field
      PREHOOK: type: LOAD
      #### A masked pattern was here ####

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_joins.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_joins.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_joins.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_joins.q.out Wed Apr 1 01:15:50 2015
    @@ -31,7 +31,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
          {
            "name":"extra_field",
            "type":"string",
    - "doc:":"an extra field not in the original file",
    + "doc":"an extra field not in the original file",
            "default":"fishfingers and custard"
          }
        ]
    @@ -72,7 +72,7 @@ TBLPROPERTIES ('avro.schema.literal'='{
          {
            "name":"extra_field",
            "type":"string",
    - "doc:":"an extra field not in the original file",
    + "doc":"an extra field not in the original file",
            "default":"fishfingers and custard"
          }
        ]
    @@ -86,10 +86,10 @@ PREHOOK: Input: default@doctors4
      POSTHOOK: query: DESCRIBE doctors4
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors4
    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    -extra_field string from deserializer
    +number int Order of playing the role
    +first_name string first name of actor playing role
    +last_name string last name of actor playing role
    +extra_field string an extra field not in the original file
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4
      PREHOOK: type: LOAD
      #### A masked pattern was here ####
    @@ -166,9 +166,9 @@ PREHOOK: Input: default@episodes
      POSTHOOK: query: DESCRIBE episodes
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@episodes
    -title string from deserializer
    -air_date string from deserializer
    -doctor int from deserializer
    +title string episode title
    +air_date string initial date
    +doctor int main actor playing the Doctor in episode
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes
      PREHOOK: type: LOAD
      #### A masked pattern was here ####

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_joins_native.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_joins_native.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_joins_native.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_joins_native.q.out Wed Apr 1 01:15:50 2015
    @@ -28,9 +28,9 @@ PREHOOK: Input: default@doctors4
      POSTHOOK: query: DESCRIBE doctors4
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors4
    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    +number int Order of playing the role
    +first_name string first name of actor playing role
    +last_name string last name of actor playing role
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors4
      PREHOOK: type: LOAD
      #### A masked pattern was here ####
    @@ -61,9 +61,9 @@ PREHOOK: Input: default@episodes
      POSTHOOK: query: DESCRIBE episodes
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@episodes
    -title string from deserializer
    -air_date string from deserializer
    -doctor int from deserializer
    +title string episode title
    +air_date string initial date
    +doctor int main actor playing the Doctor in episode
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/episodes.avro' INTO TABLE episodes
      PREHOOK: type: LOAD
      #### A masked pattern was here ####

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_native.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_native.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_native.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_native.q.out Wed Apr 1 01:15:50 2015
    @@ -26,9 +26,9 @@ PREHOOK: Input: default@doctors
      POSTHOOK: query: DESCRIBE doctors
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors
    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    +number int
    +first_name string
    +last_name string
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors
      PREHOOK: type: LOAD
      #### A masked pattern was here ####

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_partitioned.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_partitioned.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_partitioned.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_partitioned.q.out Wed Apr 1 01:15:50 2015
    @@ -150,27 +150,27 @@ POSTHOOK: Output: default@episodes_parti
      POSTHOOK: Output: default@episodes_partitioned@doctor_pt=5
      POSTHOOK: Output: default@episodes_partitioned@doctor_pt=6
      POSTHOOK: Output: default@episodes_partitioned@doctor_pt=9
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
      PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt > 6
      PREHOOK: type: QUERY
      PREHOOK: Input: default@episodes_partitioned
    @@ -360,27 +360,27 @@ POSTHOOK: Output: default@episodes_parti
      POSTHOOK: Output: default@episodes_partitioned_serdeproperties@doctor_pt=5
      POSTHOOK: Output: default@episodes_partitioned_serdeproperties@doctor_pt=6
      POSTHOOK: Output: default@episodes_partitioned_serdeproperties@doctor_pt=9
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=11).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=11).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=11).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=1).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=1).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=1).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=2).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=2).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=2).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=4).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=4).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=4).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=5).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=5).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=5).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=11).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=11).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=11).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=1).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=1).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=1).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=2).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=2).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=2).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=4).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=4).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=4).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=5).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=5).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=5).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=6).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned_serdeproperties PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
      PREHOOK: query: -- Evolve the table schema by adding new array field "cast_and_crew"
      ALTER TABLE episodes_partitioned_serdeproperties
      SET SERDE 'org.apache.hadoop.hive.serde2.avro.AvroSerDe'

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_partitioned_native.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_partitioned_native.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_partitioned_native.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_partitioned_native.q.out Wed Apr 1 01:15:50 2015
    @@ -60,27 +60,27 @@ POSTHOOK: Output: default@episodes_parti
      POSTHOOK: Output: default@episodes_partitioned@doctor_pt=5
      POSTHOOK: Output: default@episodes_partitioned@doctor_pt=6
      POSTHOOK: Output: default@episodes_partitioned@doctor_pt=9
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:from deserializer), ]
    -POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:from deserializer), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=11).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=1).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=2).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=4).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=5).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=6).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).air_date SIMPLE [(episodes)episodes.FieldSchema(name:air_date, type:string, comment:initial date), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).doctor SIMPLE [(episodes)episodes.FieldSchema(name:doctor, type:int, comment:main actor playing the Doctor in episode), ]
    +POSTHOOK: Lineage: episodes_partitioned PARTITION(doctor_pt=9).title SIMPLE [(episodes)episodes.FieldSchema(name:title, type:string, comment:episode title), ]
      PREHOOK: query: SELECT * FROM episodes_partitioned WHERE doctor_pt > 6
      PREHOOK: type: QUERY
      PREHOOK: Input: default@episodes_partitioned

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/avro_sanity_test.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/avro_sanity_test.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/avro_sanity_test.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/avro_sanity_test.q.out Wed Apr 1 01:15:50 2015
    @@ -72,9 +72,9 @@ PREHOOK: Input: default@doctors
      POSTHOOK: query: DESCRIBE doctors
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors
    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    +number int Order of playing the role
    +first_name string first name of actor playing role
    +last_name string last name of actor playing role
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors
      PREHOOK: type: LOAD
      #### A masked pattern was here ####
  • Prasanthj at Apr 1, 2015 at 1:15 am
    Modified: hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/groupby_sort_skew_1_23.q.out Wed Apr 1 01:15:50 2015
    @@ -189,7 +189,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:t1]

        Stage: Stage-7
          Conditional Operator
    @@ -527,7 +527,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -849,7 +849,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:t1]

        Stage: Stage-7
          Conditional Operator
    @@ -1257,7 +1257,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:t1]

        Stage: Stage-7
          Conditional Operator
    @@ -1548,9 +1548,13 @@ TOK_QUERY

      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-2 depends on stages: Stage-1
    - Stage-0 depends on stages: Stage-2
    - Stage-3 depends on stages: Stage-0
    + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
    + Stage-4
    + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
    + Stage-2 depends on stages: Stage-0
    + Stage-3
    + Stage-5
    + Stage-6 depends on stages: Stage-5

      STAGE PLANS:
        Stage: Stage-1
    @@ -1567,17 +1571,39 @@ STAGE PLANS:
                    Group By Operator
                      aggregations: count(1)
                      keys: 1 (type: int), _col1 (type: string)
    - mode: hash
    + mode: final
                      outputColumnNames: _col0, _col1, _col2
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: int), _col1 (type: string)
    - sort order: ++
    - Map-reduce partition columns: rand() (type: double)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col2 (type: bigint)
    - auto parallelism: false
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: _col0 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int)
    + outputColumnNames: _col0, _col1, _col2
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 1
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,cnt
    + columns.comments
    + columns.types int:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl3
    + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl3
    + TotalFiles: 1
    + GatherStats: true
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
    @@ -1630,90 +1656,122 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: int), KEY._col1 (type: string)
    - mode: partials
    - outputColumnNames: _col0, _col1, _col2
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - GlobalTableId: 0
    + /t1 [$hdt$_0:t1]
    +
    + Stage: Stage-7
    + Conditional Operator
    +
    + Stage: Stage-4
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####
    - NumFilesPerFileSink: 1
    - table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - properties:
    - columns _col0,_col1,_col2
    - columns.types int,string,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - TotalFiles: 1
    - GatherStats: false
    - MultiFileSpray: false
    +
    + Stage: Stage-0
    + Move Operator
    + tables:
    + replace: true
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,cnt
    + columns.comments
    + columns.types int:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl3
    + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl3

        Stage: Stage-2
    + Stats-Aggr Operator
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
          Map Reduce
            Map Operator Tree:
                TableScan
                  GatherStats: false
    - Reduce Output Operator
    - key expressions: _col0 (type: int), _col1 (type: string)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: int), _col1 (type: string)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col2 (type: bigint)
    - auto parallelism: false
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,cnt
    + columns.comments
    + columns.types int:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl3
    + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl3
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
      #### A masked pattern was here ####
                Partition
    - base file name: -mr-10001
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                  properties:
    - columns _col0,_col1,_col2
    - columns.types int,string,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + bucket_count -1
    + columns key1,key2,cnt
    + columns.comments
    + columns.types int:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl3
    + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    - columns _col0,_col1,_col2
    - columns.types int,string,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + bucket_count -1
    + columns key1,key2,cnt
    + columns.comments
    + columns.types int:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl3
    + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl3
    + name: default.outputtbl3
            Truncated Path -> Alias:
      #### A masked pattern was here ####
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: int), KEY._col1 (type: string)
    - mode: final
    - outputColumnNames: _col0, _col1, _col2
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: _col0 (type: int), UDFToInteger(_col1) (type: int), UDFToInteger(_col2) (type: int)
    - outputColumnNames: _col0, _col1, _col2
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +
    + Stage: Stage-5
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
                  File Output Operator
                    compressed: false
    - GlobalTableId: 1
    + GlobalTableId: 0
      #### A masked pattern was here ####
                    NumFilesPerFileSink: 1
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    -#### A masked pattern was here ####
                    table:
                        input format: org.apache.hadoop.mapred.TextInputFormat
                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    @@ -1731,15 +1789,29 @@ STAGE PLANS:
                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                        name: default.outputtbl3
                    TotalFiles: 1
    - GatherStats: true
    + GatherStats: false
                    MultiFileSpray: false
    -
    - Stage: Stage-0
    - Move Operator
    - tables:
    - replace: true
    + Path -> Alias:
      #### A masked pattern was here ####
    - table:
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,cnt
    + columns.comments
    + columns.types int:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl3
    + serialization.ddl struct outputtbl3 { i32 key1, i32 key2, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
                    input format: org.apache.hadoop.mapred.TextInputFormat
                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    @@ -1755,9 +1827,14 @@ STAGE PLANS:
      #### A masked pattern was here ####
                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                    name: default.outputtbl3
    + name: default.outputtbl3
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####

    - Stage: Stage-3
    - Stats-Aggr Operator
    + Stage: Stage-6
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####

      PREHOOK: query: INSERT OVERWRITE TABLE outputTbl3
    @@ -1921,7 +1998,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -2207,7 +2284,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -2539,7 +2616,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [$hdt$_0:$hdt$_0:$hdt$_0:t1]
    + /t1 [$hdt$_0:$hdt$_0:t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -2951,7 +3028,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:t1, null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:t1]
    + /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:t1, null-subquery2:$hdt$_0-subquery2:$hdt$_0:t1]

        Stage: Stage-7
          Conditional Operator
    @@ -3377,7 +3454,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:t1]
    + /t1 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:t1]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -3637,7 +3714,7 @@ STAGE PLANS:
                    name: default.t1
                  name: default.t1
            Truncated Path -> Alias:
    - /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:$hdt$_0:t1]
    + /t1 [null-subquery1:$hdt$_0-subquery1:$hdt$_0:t1]
      #### A masked pattern was here ####

        Stage: Stage-8
    @@ -4761,7 +4838,7 @@ STAGE PLANS:
                    name: default.t2
                  name: default.t2
            Truncated Path -> Alias:
    - /t2 [$hdt$_0:$hdt$_0:t2]
    + /t2 [$hdt$_0:t2]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator
    @@ -4973,9 +5050,13 @@ TOK_QUERY

      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-2 depends on stages: Stage-1
    - Stage-0 depends on stages: Stage-2
    - Stage-3 depends on stages: Stage-0
    + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
    + Stage-4
    + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
    + Stage-2 depends on stages: Stage-0
    + Stage-3
    + Stage-5
    + Stage-6 depends on stages: Stage-5

      STAGE PLANS:
        Stage: Stage-1
    @@ -4992,20 +5073,47 @@ STAGE PLANS:
                    Group By Operator
                      aggregations: count(1)
                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
    - mode: hash
    + mode: final
                      outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - sort order: +++
    - Map-reduce partition columns: rand() (type: double)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col3 (type: bigint)
    - auto parallelism: false
    - Path -> Alias:
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
    + outputColumnNames: _col0, _col1, _col2, _col3
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 1
      #### A masked pattern was here ####
    - Path -> Partition:
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + TotalFiles: 1
    + GatherStats: true
    + MultiFileSpray: false
    + Path -> Alias:
    +#### A masked pattern was here ####
    + Path -> Partition:
      #### A masked pattern was here ####
                Partition
                  base file name: t2
    @@ -5055,90 +5163,142 @@ STAGE PLANS:
                    name: default.t2
                  name: default.t2
            Truncated Path -> Alias:
    - /t2 [$hdt$_0:$hdt$_0:t2]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
    - mode: partials
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - GlobalTableId: 0
    + /t2 [$hdt$_0:t2]
    +
    + Stage: Stage-7
    + Conditional Operator
    +
    + Stage: Stage-4
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####
    - NumFilesPerFileSink: 1
    - table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - properties:
    - columns _col0,_col1,_col2,_col3
    - columns.types string,int,string,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - TotalFiles: 1
    - GatherStats: false
    - MultiFileSpray: false
    +
    + Stage: Stage-0
    + Move Operator
    + tables:
    + replace: true
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4

        Stage: Stage-2
    + Stats-Aggr Operator
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
          Map Reduce
            Map Operator Tree:
                TableScan
                  GatherStats: false
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - sort order: +++
    - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col3 (type: bigint)
    - auto parallelism: false
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
      #### A masked pattern was here ####
                Partition
    - base file name: -mr-10001
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                  properties:
    - columns _col0,_col1,_col2,_col3
    - columns.types string,int,string,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    - columns _col0,_col1,_col2,_col3
    - columns.types string,int,string,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + name: default.outputtbl4
            Truncated Path -> Alias:
      #### A masked pattern was here ####
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
    - mode: final
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +
    + Stage: Stage-5
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
                  File Output Operator
                    compressed: false
    - GlobalTableId: 1
    + GlobalTableId: 0
      #### A masked pattern was here ####
                    NumFilesPerFileSink: 1
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    -#### A masked pattern was here ####
                    table:
                        input format: org.apache.hadoop.mapred.TextInputFormat
                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    @@ -5161,15 +5321,34 @@ STAGE PLANS:
                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                        name: default.outputtbl4
                    TotalFiles: 1
    - GatherStats: true
    + GatherStats: false
                    MultiFileSpray: false
    -
    - Stage: Stage-0
    - Move Operator
    - tables:
    - replace: true
    + Path -> Alias:
      #### A masked pattern was here ####
    - table:
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
                    input format: org.apache.hadoop.mapred.TextInputFormat
                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    @@ -5190,9 +5369,14 @@ STAGE PLANS:
      #### A masked pattern was here ####
                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                    name: default.outputtbl4
    + name: default.outputtbl4
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####

    - Stage: Stage-3
    - Stats-Aggr Operator
    + Stage: Stage-6
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####

      PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
    @@ -5281,9 +5465,13 @@ TOK_QUERY

      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-2 depends on stages: Stage-1
    - Stage-0 depends on stages: Stage-2
    - Stage-3 depends on stages: Stage-0
    + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
    + Stage-4
    + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
    + Stage-2 depends on stages: Stage-0
    + Stage-3
    + Stage-5
    + Stage-6 depends on stages: Stage-5

      STAGE PLANS:
        Stage: Stage-1
    @@ -5300,17 +5488,39 @@ STAGE PLANS:
                    Group By Operator
                      aggregations: count(1)
                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string), 2 (type: int)
    - mode: hash
    + mode: final
                      outputColumnNames: _col0, _col1, _col2, _col3, _col4
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int)
    - sort order: ++++
    - Map-reduce partition columns: rand() (type: double)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col4 (type: bigint)
    - auto parallelism: false
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), UDFToInteger(_col4) (type: int)
    + outputColumnNames: _col0, _col1, _col2, _col3, _col4
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 1
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl5
    + TotalFiles: 1
    + GatherStats: true
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
    @@ -5363,90 +5573,122 @@ STAGE PLANS:
                    name: default.t2
                  name: default.t2
            Truncated Path -> Alias:
    - /t2 [$hdt$_0:$hdt$_0:t2]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string), KEY._col3 (type: int)
    - mode: partials
    - outputColumnNames: _col0, _col1, _col2, _col3, _col4
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - GlobalTableId: 0
    + /t2 [$hdt$_0:t2]
    +
    + Stage: Stage-7
    + Conditional Operator
    +
    + Stage: Stage-4
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####
    - NumFilesPerFileSink: 1
    - table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - properties:
    - columns _col0,_col1,_col2,_col3,_col4
    - columns.types string,int,string,int,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - TotalFiles: 1
    - GatherStats: false
    - MultiFileSpray: false
    +
    + Stage: Stage-0
    + Move Operator
    + tables:
    + replace: true
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl5

        Stage: Stage-2
    + Stats-Aggr Operator
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
          Map Reduce
            Map Operator Tree:
                TableScan
                  GatherStats: false
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int)
    - sort order: ++++
    - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string), _col3 (type: int)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col4 (type: bigint)
    - auto parallelism: false
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl5
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
      #### A masked pattern was here ####
                Partition
    - base file name: -mr-10001
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                  properties:
    - columns _col0,_col1,_col2,_col3,_col4
    - columns.types string,int,string,int,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    - columns _col0,_col1,_col2,_col3,_col4
    - columns.types string,int,string,int,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl5
    + name: default.outputtbl5
            Truncated Path -> Alias:
      #### A masked pattern was here ####
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string), KEY._col3 (type: int)
    - mode: final
    - outputColumnNames: _col0, _col1, _col2, _col3, _col4
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), _col3 (type: int), UDFToInteger(_col4) (type: int)
    - outputColumnNames: _col0, _col1, _col2, _col3, _col4
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +
    + Stage: Stage-5
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
                  File Output Operator
                    compressed: false
    - GlobalTableId: 1
    + GlobalTableId: 0
      #### A masked pattern was here ####
                    NumFilesPerFileSink: 1
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    -#### A masked pattern was here ####
                    table:
                        input format: org.apache.hadoop.mapred.TextInputFormat
                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    @@ -5464,15 +5706,29 @@ STAGE PLANS:
                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                        name: default.outputtbl5
                    TotalFiles: 1
    - GatherStats: true
    + GatherStats: false
                    MultiFileSpray: false
    -
    - Stage: Stage-0
    - Move Operator
    - tables:
    - replace: true
    + Path -> Alias:
      #### A masked pattern was here ####
    - table:
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + bucket_count -1
    + columns key1,key2,key3,key4,cnt
    + columns.comments
    + columns.types int:int:string:int:int
    +#### A masked pattern was here ####
    + name default.outputtbl5
    + serialization.ddl struct outputtbl5 { i32 key1, i32 key2, string key3, i32 key4, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
                    input format: org.apache.hadoop.mapred.TextInputFormat
                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    @@ -5488,9 +5744,14 @@ STAGE PLANS:
      #### A masked pattern was here ####
                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                    name: default.outputtbl5
    + name: default.outputtbl5
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####

    - Stage: Stage-3
    - Stats-Aggr Operator
    + Stage: Stage-6
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####

      PREHOOK: query: INSERT OVERWRITE TABLE outputTbl5
    @@ -5593,9 +5854,13 @@ TOK_QUERY

      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-2 depends on stages: Stage-1
    - Stage-0 depends on stages: Stage-2
    - Stage-3 depends on stages: Stage-0
    + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
    + Stage-4
    + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
    + Stage-2 depends on stages: Stage-0
    + Stage-3
    + Stage-5
    + Stage-6 depends on stages: Stage-5

      STAGE PLANS:
        Stage: Stage-1
    @@ -5612,17 +5877,44 @@ STAGE PLANS:
                    Group By Operator
                      aggregations: count(1)
                      keys: _col0 (type: string), 1 (type: int), _col2 (type: string)
    - mode: hash
    + mode: final
                      outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - sort order: +++
    - Map-reduce partition columns: rand() (type: double)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col3 (type: bigint)
    - auto parallelism: false
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
    + outputColumnNames: _col0, _col1, _col2, _col3
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 1
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + TotalFiles: 1
    + GatherStats: true
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
    @@ -5675,90 +5967,142 @@ STAGE PLANS:
                    name: default.t2
                  name: default.t2
            Truncated Path -> Alias:
    - /t2 [$hdt$_0:$hdt$_0:$hdt$_0:t2]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
    - mode: partials
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - GlobalTableId: 0
    + /t2 [$hdt$_0:t2]
    +
    + Stage: Stage-7
    + Conditional Operator
    +
    + Stage: Stage-4
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####
    - NumFilesPerFileSink: 1
    - table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - properties:
    - columns _col0,_col1,_col2,_col3
    - columns.types string,int,string,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - TotalFiles: 1
    - GatherStats: false
    - MultiFileSpray: false
    +
    + Stage: Stage-0
    + Move Operator
    + tables:
    + replace: true
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4

        Stage: Stage-2
    + Stats-Aggr Operator
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
          Map Reduce
            Map Operator Tree:
                TableScan
                  GatherStats: false
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - sort order: +++
    - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col3 (type: bigint)
    - auto parallelism: false
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
      #### A masked pattern was here ####
                Partition
    - base file name: -mr-10001
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                  properties:
    - columns _col0,_col1,_col2,_col3
    - columns.types string,int,string,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    - columns _col0,_col1,_col2,_col3
    - columns.types string,int,string,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + name: default.outputtbl4
            Truncated Path -> Alias:
      #### A masked pattern was here ####
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
    - mode: final
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +
    + Stage: Stage-5
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
                  File Output Operator
                    compressed: false
    - GlobalTableId: 1
    + GlobalTableId: 0
      #### A masked pattern was here ####
                    NumFilesPerFileSink: 1
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    -#### A masked pattern was here ####
                    table:
                        input format: org.apache.hadoop.mapred.TextInputFormat
                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    @@ -5781,15 +6125,34 @@ STAGE PLANS:
                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                        name: default.outputtbl4
                    TotalFiles: 1
    - GatherStats: true
    + GatherStats: false
                    MultiFileSpray: false
    -
    - Stage: Stage-0
    - Move Operator
    - tables:
    - replace: true
    + Path -> Alias:
      #### A masked pattern was here ####
    - table:
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
                    input format: org.apache.hadoop.mapred.TextInputFormat
                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    @@ -5810,9 +6173,14 @@ STAGE PLANS:
      #### A masked pattern was here ####
                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                    name: default.outputtbl4
    + name: default.outputtbl4
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####

    - Stage: Stage-3
    - Stats-Aggr Operator
    + Stage: Stage-6
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####

      PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4
    @@ -5944,9 +6312,13 @@ TOK_QUERY

      STAGE DEPENDENCIES:
        Stage-1 is a root stage
    - Stage-2 depends on stages: Stage-1
    - Stage-0 depends on stages: Stage-2
    - Stage-3 depends on stages: Stage-0
    + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5
    + Stage-4
    + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6
    + Stage-2 depends on stages: Stage-0
    + Stage-3
    + Stage-5
    + Stage-6 depends on stages: Stage-5

      STAGE PLANS:
        Stage: Stage-1
    @@ -5962,19 +6334,45 @@ STAGE PLANS:
                    Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
                    Group By Operator
                      aggregations: count(1)
    - bucketGroup: true
                      keys: _col0 (type: string), 2 (type: int), _col2 (type: string)
    - mode: hash
    + mode: final
                      outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - sort order: +++
    - Map-reduce partition columns: rand() (type: double)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col3 (type: bigint)
    - auto parallelism: false
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
    + outputColumnNames: _col0, _col1, _col2, _col3
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + GlobalTableId: 1
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + TotalFiles: 1
    + GatherStats: true
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
    @@ -6027,90 +6425,142 @@ STAGE PLANS:
                    name: default.t2
                  name: default.t2
            Truncated Path -> Alias:
    - /t2 [$hdt$_0:$hdt$_0:$hdt$_0:t2]
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
    - mode: partials
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - GlobalTableId: 0
    + /t2 [$hdt$_0:t2]
    +
    + Stage: Stage-7
    + Conditional Operator
    +
    + Stage: Stage-4
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####
    - NumFilesPerFileSink: 1
    - table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - properties:
    - columns _col0,_col1,_col2,_col3
    - columns.types string,int,string,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - TotalFiles: 1
    - GatherStats: false
    - MultiFileSpray: false
    +
    + Stage: Stage-0
    + Move Operator
    + tables:
    + replace: true
    +#### A masked pattern was here ####
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4

        Stage: Stage-2
    + Stats-Aggr Operator
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
          Map Reduce
            Map Operator Tree:
                TableScan
                  GatherStats: false
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - sort order: +++
    - Map-reduce partition columns: _col0 (type: string), _col1 (type: int), _col2 (type: string)
    - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: NONE
    - tag: -1
    - value expressions: _col3 (type: bigint)
    - auto parallelism: false
    + File Output Operator
    + compressed: false
    + GlobalTableId: 0
    +#### A masked pattern was here ####
    + NumFilesPerFileSink: 1
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + TotalFiles: 1
    + GatherStats: false
    + MultiFileSpray: false
            Path -> Alias:
      #### A masked pattern was here ####
            Path -> Partition:
      #### A masked pattern was here ####
                Partition
    - base file name: -mr-10001
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                  properties:
    - columns _col0,_col1,_col2,_col3
    - columns.types string,int,string,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    - columns _col0,_col1,_col2,_col3
    - columns.types string,int,string,bigint
    - escape.delim \
    - serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.outputtbl4
    + name: default.outputtbl4
            Truncated Path -> Alias:
      #### A masked pattern was here ####
    - Needs Tagging: false
    - Reduce Operator Tree:
    - Group By Operator
    - aggregations: count(VALUE._col0)
    - keys: KEY._col0 (type: string), KEY._col1 (type: int), KEY._col2 (type: string)
    - mode: final
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: UDFToInteger(_col0) (type: int), _col1 (type: int), _col2 (type: string), UDFToInteger(_col3) (type: int)
    - outputColumnNames: _col0, _col1, _col2, _col3
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    +
    + Stage: Stage-5
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + GatherStats: false
                  File Output Operator
                    compressed: false
    - GlobalTableId: 1
    + GlobalTableId: 0
      #### A masked pattern was here ####
                    NumFilesPerFileSink: 1
    - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: NONE
    -#### A masked pattern was here ####
                    table:
                        input format: org.apache.hadoop.mapred.TextInputFormat
                        output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    @@ -6133,15 +6583,34 @@ STAGE PLANS:
                        serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                        name: default.outputtbl4
                    TotalFiles: 1
    - GatherStats: true
    + GatherStats: false
                    MultiFileSpray: false
    -
    - Stage: Stage-0
    - Move Operator
    - tables:
    - replace: true
    + Path -> Alias:
      #### A masked pattern was here ####
    - table:
    + Path -> Partition:
    +#### A masked pattern was here ####
    + Partition
    + base file name: -ext-10001
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + properties:
    + COLUMN_STATS_ACCURATE true
    + bucket_count -1
    + columns key1,key2,key3,cnt
    + columns.comments
    + columns.types int:int:string:int
    +#### A masked pattern was here ####
    + name default.outputtbl4
    + numFiles 1
    + numRows 6
    + rawDataSize 48
    + serialization.ddl struct outputtbl4 { i32 key1, i32 key2, string key3, i32 cnt}
    + serialization.format 1
    + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + totalSize 54
    +#### A masked pattern was here ####
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
                    input format: org.apache.hadoop.mapred.TextInputFormat
                    output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                    properties:
    @@ -6162,9 +6631,14 @@ STAGE PLANS:
      #### A masked pattern was here ####
                    serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                    name: default.outputtbl4
    + name: default.outputtbl4
    + Truncated Path -> Alias:
    +#### A masked pattern was here ####

    - Stage: Stage-3
    - Stats-Aggr Operator
    + Stage: Stage-6
    + Move Operator
    + files:
    + hdfs directory: true
      #### A masked pattern was here ####

      PREHOOK: query: INSERT OVERWRITE TABLE outputTbl4

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/index_serde.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/index_serde.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/index_serde.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/index_serde.q.out Wed Apr 1 01:15:50 2015
    @@ -72,9 +72,9 @@ PREHOOK: Input: default@doctors
      POSTHOOK: query: DESCRIBE doctors
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@doctors
    -number int from deserializer
    -first_name string from deserializer
    -last_name string from deserializer
    +number int Order of playing the role
    +first_name string first name of actor playing role
    +last_name string last name of actor playing role
      PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/doctors.avro' INTO TABLE doctors
      PREHOOK: type: LOAD
      #### A masked pattern was here ####
    @@ -98,7 +98,7 @@ PREHOOK: Input: default@default__doctors
      POSTHOOK: query: DESCRIBE EXTENDED default__doctors_doctors_index__
      POSTHOOK: type: DESCTABLE
      POSTHOOK: Input: default@default__doctors_doctors_index__
    -number int from deserializer
    +number int Order of playing the role
      _bucketname string
      _offsets array<bigint>

    @@ -113,7 +113,7 @@ POSTHOOK: Input: default@doctors
      POSTHOOK: Output: default@default__doctors_doctors_index__
      POSTHOOK: Lineage: default__doctors_doctors_index__._bucketname SIMPLE [(doctors)doctors.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ]
      POSTHOOK: Lineage: default__doctors_doctors_index__._offsets EXPRESSION [(doctors)doctors.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), ]
    -POSTHOOK: Lineage: default__doctors_doctors_index__.number SIMPLE [(doctors)doctors.FieldSchema(name:number, type:int, comment:from deserializer), ]
    +POSTHOOK: Lineage: default__doctors_doctors_index__.number SIMPLE [(doctors)doctors.FieldSchema(name:number, type:int, comment:Order of playing the role), ]
      PREHOOK: query: EXPLAIN SELECT * FROM doctors WHERE number > 6
      PREHOOK: type: QUERY
      POSTHOOK: query: EXPLAIN SELECT * FROM doctors WHERE number > 6

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/input_part1.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/input_part1.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/input_part1.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/input_part1.q.out Wed Apr 1 01:15:50 2015
    @@ -172,7 +172,7 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=12 [$hdt$_0:srcpart]
    + /srcpart/ds=2008-04-08/hr=12 [srcpart]

        Stage: Stage-7
          Conditional Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/join28.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/join28.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/join28.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/join28.q.out Wed Apr 1 01:15:50 2015
    @@ -40,14 +40,14 @@ STAGE PLANS:
        Stage: Stage-7
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:z
    + $hdt$_0:z
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:z
    + $hdt$_0:z
                TableScan
                  alias: z
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -62,7 +62,7 @@ STAGE PLANS:
                        keys:
                          0 _col0 (type: string)
                          1 _col0 (type: string)
    - $hdt$_0:$hdt$_1:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/join29.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/join29.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/join29.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/join29.q.out Wed Apr 1 01:15:50 2015
    @@ -84,11 +84,11 @@ STAGE PLANS:
        Stage: Stage-8
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$INTNAME1
    + $INTNAME1
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$INTNAME1
    + $INTNAME1
                TableScan
                  HashTable Sink Operator
                    keys:
    @@ -135,11 +135,11 @@ STAGE PLANS:
        Stage: Stage-9
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$INTNAME
    + $INTNAME
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$INTNAME
    + $INTNAME
                TableScan
                  HashTable Sink Operator
                    keys:

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/join31.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/join31.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/join31.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/join31.q.out Wed Apr 1 01:15:50 2015
    @@ -84,11 +84,11 @@ STAGE PLANS:
        Stage: Stage-9
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$INTNAME1
    + $hdt$_0:$INTNAME1
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$INTNAME1
    + $hdt$_0:$INTNAME1
                TableScan
                  HashTable Sink Operator
                    keys:
    @@ -169,11 +169,11 @@ STAGE PLANS:
        Stage: Stage-10
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:$INTNAME
    + $hdt$_0:$INTNAME
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:$INTNAME
    + $hdt$_0:$INTNAME
                TableScan
                  HashTable Sink Operator
                    keys:

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/join32.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/join32.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/join32.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/join32.q.out Wed Apr 1 01:15:50 2015
    @@ -109,14 +109,14 @@ STAGE PLANS:
        Stage: Stage-7
          Map Reduce Local Work
            Alias -> Map Local Tables:
    - $hdt$_0:$hdt$_0:y
    + $hdt$_0:y
                Fetch Operator
                  limit: -1
    - $hdt$_0:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                Fetch Operator
                  limit: -1
            Alias -> Map Local Operator Tree:
    - $hdt$_0:$hdt$_0:y
    + $hdt$_0:y
                TableScan
                  alias: y
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    @@ -134,7 +134,7 @@ STAGE PLANS:
                          0 _col0 (type: string)
                          1 _col3 (type: string)
                        Position of Big Table: 1
    - $hdt$_0:$hdt$_1:$hdt$_2:x
    + $hdt$_1:$hdt$_2:x
                TableScan
                  alias: x
                  Statistics: Num rows: 25 Data size: 191 Basic stats: COMPLETE Column stats: NONE
    @@ -356,7 +356,7 @@ STAGE PLANS:
                    name: default.srcpart
                  name: default.srcpart
            Truncated Path -> Alias:
    - /srcpart/ds=2008-04-08/hr=11 [$hdt$_0:$hdt$_1:$hdt$_1:z]
    + /srcpart/ds=2008-04-08/hr=11 [$hdt$_1:$hdt$_1:z]

        Stage: Stage-0
          Move Operator
  • Prasanthj at Apr 1, 2015 at 1:15 am
    Modified: hive/branches/llap/ql/src/test/results/clientpositive/subquery_in.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/subquery_in.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/subquery_in.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/subquery_in.q.out Wed Apr 1 01:15:50 2015
    @@ -254,36 +254,32 @@ STAGE PLANS:
                TableScan
                  alias: part
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                    Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1
    + outputColumnNames: _col2, _col5
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: int
    + output shape: _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col1
    - partition by: _col0
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col1
    + arguments: _col5
                              name: rank
                              window function: GenericUDAFRankEvaluator
                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
    @@ -293,7 +289,7 @@ STAGE PLANS:
                    predicate: (_wcol0 <= 2) (type: boolean)
                    Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
    - expressions: _col1 (type: int)
    + expressions: _col5 (type: int)
                      outputColumnNames: _col0
                      Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                      Group By Operator
    @@ -438,46 +434,42 @@ STAGE PLANS:
                TableScan
                  alias: b
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                    Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1
    + outputColumnNames: _col2, _col5
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: int
    + output shape: _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col1
    - partition by: _col0
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col1
    + arguments: _col5
                              name: rank
                              window function: GenericUDAFRankEvaluator
                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
                              isPivotResult: true
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((_wcol0 <= 2) and _col0 is not null) (type: boolean)
    + predicate: ((_wcol0 <= 2) and _col2 is not null) (type: boolean)
                    Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
    - expressions: _col0 (type: string), _col1 (type: int)
    + expressions: _col2 (type: string), _col5 (type: int)
                      outputColumnNames: _col0, _col1
                      Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
                      Group By Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/subquery_in_having.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/subquery_in_having.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/subquery_in_having.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/subquery_in_having.q.out Wed Apr 1 01:15:50 2015
    @@ -1332,37 +1332,33 @@ STAGE PLANS:
                TableScan
                  alias: part_subq
                  Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1, _col2
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                    Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col1 (type: string), _col2 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col1 (type: string)
    - Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col0 (type: string)
    + value expressions: p_name (type: string)
            Reduce Operator Tree:
              Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1, _col2
    + expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    + outputColumnNames: _col1, _col2, _col5
                Statistics: Num rows: 15 Data size: 3173 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: string, _col2: int
    + output shape: _col1: string, _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col2
    - partition by: _col1
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col0
    + arguments: _col1
                              name: first_value
                              window function: GenericUDAFFirstValueEvaluator
                              window frame: PRECEDING(MAX)~

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/subquery_notin.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/subquery_notin.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/subquery_notin.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/subquery_notin.q.out Wed Apr 1 01:15:50 2015
    @@ -285,7 +285,7 @@ POSTHOOK: Input: default@src
      199 val_199
      199 val_199
      2 val_2
    -Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
      PREHOOK: query: -- non agg, corr
      explain
      select p_mfgr, b.p_name, p_size
    @@ -321,44 +321,40 @@ STAGE PLANS:
                TableScan
                  alias: b
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1, _col2
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                    Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col1 (type: string), _col2 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col1 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col0 (type: string)
    + value expressions: p_name (type: string)
            Reduce Operator Tree:
              Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1, _col2
    + expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    + outputColumnNames: _col1, _col2, _col5
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: string, _col2: int
    + output shape: _col1: string, _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col2
    - partition by: _col1
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col2
    + arguments: _col5
                              name: rank
                              window function: GenericUDAFRankEvaluator
                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
                              isPivotResult: true
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((_wcol0 <= 2) and (_col0 is null or _col1 is null)) (type: boolean)
    + predicate: ((_wcol0 <= 2) and (_col1 is null or _col2 is null)) (type: boolean)
                    Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
    @@ -480,37 +476,33 @@ STAGE PLANS:
                TableScan
                  alias: b
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1, _col2
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                    Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col1 (type: string), _col2 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col1 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col0 (type: string)
    + value expressions: p_name (type: string)
            Reduce Operator Tree:
              Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1, _col2
    + expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    + outputColumnNames: _col1, _col2, _col5
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: string, _col2: int
    + output shape: _col1: string, _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col2
    - partition by: _col1
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col2
    + arguments: _col5
                              name: rank
                              window function: GenericUDAFRankEvaluator
                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
    @@ -520,7 +512,7 @@ STAGE PLANS:
                    predicate: (_wcol0 <= 2) (type: boolean)
                    Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
    - expressions: _col0 (type: string), _col1 (type: string)
    + expressions: _col1 (type: string), _col2 (type: string)
                      outputColumnNames: _col0, _col1
                      Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                      File Output Operator
    @@ -536,7 +528,7 @@ STAGE PLANS:
            Processor Tree:
              ListSink

    -Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
      PREHOOK: query: select p_mfgr, b.p_name, p_size
      from part b
      where b.p_name not in
    @@ -575,7 +567,7 @@ Manufacturer#4 almond azure aquamarine p
      Manufacturer#5 almond antique blue firebrick mint 31
      Manufacturer#5 almond aquamarine dodger light gainsboro 46
      Manufacturer#5 almond azure blanched chiffon midnight 23
    -Warning: Shuffle Join JOIN[45][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[43][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
      PREHOOK: query: -- agg, non corr
      explain
      select p_name, p_size
    @@ -612,36 +604,32 @@ STAGE PLANS:
                TableScan
                  alias: part
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                    Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1
    + outputColumnNames: _col2, _col5
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: int
    + output shape: _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col1
    - partition by: _col0
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col1
    + arguments: _col5
                              name: rank
                              window function: GenericUDAFRankEvaluator
                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
    @@ -651,7 +639,7 @@ STAGE PLANS:
                    predicate: (_wcol0 <= 2) (type: boolean)
                    Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
    - expressions: _col1 (type: int)
    + expressions: _col5 (type: int)
                      outputColumnNames: _col0
                      Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                      Group By Operator
    @@ -763,36 +751,32 @@ STAGE PLANS:
                TableScan
                  alias: part
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                    Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1
    + outputColumnNames: _col2, _col5
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: int
    + output shape: _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col1
    - partition by: _col0
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col1
    + arguments: _col5
                              name: rank
                              window function: GenericUDAFRankEvaluator
                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
    @@ -802,7 +786,7 @@ STAGE PLANS:
                    predicate: (_wcol0 <= 2) (type: boolean)
                    Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
    - expressions: _col1 (type: int)
    + expressions: _col5 (type: int)
                      outputColumnNames: _col0
                      Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                      Group By Operator
    @@ -859,7 +843,7 @@ STAGE PLANS:
            Processor Tree:
              ListSink

    -Warning: Shuffle Join JOIN[45][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[43][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Stage-2:MAPRED' is a cross product
      PREHOOK: query: select p_name, p_size
      from
      part where part.p_size not in
    @@ -906,7 +890,7 @@ almond aquamarine sandy cyan gainsboro 1
      almond aquamarine yellow dodger mint 7
      almond azure aquamarine papaya violet 12
      almond azure blanched chiffon midnight 23
    -Warning: Shuffle Join JOIN[42][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[40][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
      PREHOOK: query: -- agg, corr
      explain
      select p_mfgr, p_name, p_size
    @@ -942,36 +926,32 @@ STAGE PLANS:
                TableScan
                  alias: b
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                    Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1
    + outputColumnNames: _col2, _col5
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: int
    + output shape: _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col1
    - partition by: _col0
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col1
    + arguments: _col5
                              name: rank
                              window function: GenericUDAFRankEvaluator
                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
    @@ -981,7 +961,7 @@ STAGE PLANS:
                    predicate: (_wcol0 <= 2) (type: boolean)
                    Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
    - expressions: _col0 (type: string), _col1 (type: int)
    + expressions: _col2 (type: string), _col5 (type: int)
                      outputColumnNames: _col0, _col1
                      Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                      Group By Operator
    @@ -1137,36 +1117,32 @@ STAGE PLANS:
                TableScan
                  alias: b
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                    Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1
    + outputColumnNames: _col2, _col5
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: int
    + output shape: _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col1
    - partition by: _col0
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col1
    + arguments: _col5
                              name: rank
                              window function: GenericUDAFRankEvaluator
                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
    @@ -1176,7 +1152,7 @@ STAGE PLANS:
                    predicate: (_wcol0 <= 2) (type: boolean)
                    Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
    - expressions: _col0 (type: string), _col1 (type: int)
    + expressions: _col2 (type: string), _col5 (type: int)
                      outputColumnNames: _col0, _col1
                      Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                      Group By Operator
    @@ -1226,7 +1202,7 @@ STAGE PLANS:
            Processor Tree:
              ListSink

    -Warning: Shuffle Join JOIN[42][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[40][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
      PREHOOK: query: select p_mfgr, p_name, p_size
      from part b where b.p_size not in
        (select min(p_size)
    @@ -1267,7 +1243,7 @@ Manufacturer#5 almond antique medium spr
      Manufacturer#5 almond azure blanched chiffon midnight 23
      Manufacturer#5 almond antique blue firebrick mint 31
      Manufacturer#5 almond aquamarine dodger light gainsboro 46
    -Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
      PREHOOK: query: -- non agg, non corr, Group By in Parent Query
      select li.l_partkey, count(*)
      from lineitem li
    @@ -1466,7 +1442,7 @@ POSTHOOK: Input: default@src
      POSTHOOK: Input: default@t1_v
      POSTHOOK: Output: database:default
      POSTHOOK: Output: default@T2_v
    -Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
      PREHOOK: query: explain
      select *
      from T1_v where T1_v.key not in (select T2_v.key from T2_v)
    @@ -1611,7 +1587,7 @@ STAGE PLANS:
            Processor Tree:
              ListSink

    -Warning: Shuffle Join JOIN[22][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
      PREHOOK: query: select *
      from T1_v where T1_v.key not in (select T2_v.key from T2_v)
      PREHOOK: type: QUERY

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out Wed Apr 1 01:15:50 2015
    @@ -207,46 +207,42 @@ STAGE PLANS:
                TableScan
                  alias: part2
                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
    - Select Operator
    - expressions: p2_mfgr (type: string), p2_size (type: int)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: p2_mfgr (type: string), p2_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p2_mfgr (type: string)
                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1
    + outputColumnNames: _col2, _col5
                Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: int
    + output shape: _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col1
    - partition by: _col0
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col1
    + arguments: _col5
                              name: rank
                              window function: GenericUDAFRankEvaluator
                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
                              isPivotResult: true
                  Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                  Filter Operator
    - predicate: ((_wcol0 <= 2) and _col0 is not null) (type: boolean)
    + predicate: ((_wcol0 <= 2) and _col2 is not null) (type: boolean)
                    Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                    Select Operator
    - expressions: _col0 (type: string), _col1 (type: int)
    + expressions: _col2 (type: string), _col5 (type: int)
                      outputColumnNames: _col0, _col1
                      Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE
                      Group By Operator
    @@ -379,46 +375,42 @@ STAGE PLANS:
                TableScan
                  alias: b
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                    Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1
    + outputColumnNames: _col2, _col5
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: int
    + output shape: _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col1
    - partition by: _col0
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col1
    + arguments: _col5
                              name: rank
                              window function: GenericUDAFRankEvaluator
                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
                              isPivotResult: true
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((_wcol0 <= 2) and _col0 is not null) (type: boolean)
    + predicate: ((_wcol0 <= 2) and _col2 is not null) (type: boolean)
                    Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
    - expressions: _col0 (type: string), _col1 (type: int)
    + expressions: _col2 (type: string), _col5 (type: int)
                      outputColumnNames: _col0, _col1
                      Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
                      Group By Operator
    @@ -789,7 +781,7 @@ STAGE PLANS:
            Processor Tree:
              ListSink

    -Warning: Shuffle Join JOIN[32][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[29][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
      PREHOOK: query: -- non agg, corr
      explain
      select p_mfgr, b.p_name, p_size
    @@ -825,44 +817,40 @@ STAGE PLANS:
                TableScan
                  alias: b
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1, _col2
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                    Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col1 (type: string), _col2 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col1 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col0 (type: string)
    + value expressions: p_name (type: string)
            Reduce Operator Tree:
              Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1, _col2
    + expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    + outputColumnNames: _col1, _col2, _col5
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: string, _col2: int
    + output shape: _col1: string, _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col2
    - partition by: _col1
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col2
    + arguments: _col5
                              name: rank
                              window function: GenericUDAFRankEvaluator
                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
                              isPivotResult: true
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((_wcol0 <= 2) and (_col0 is null or _col1 is null)) (type: boolean)
    + predicate: ((_wcol0 <= 2) and (_col1 is null or _col2 is null)) (type: boolean)
                    Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
    @@ -984,37 +972,33 @@ STAGE PLANS:
                TableScan
                  alias: b
                  Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_name (type: string), p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1, _col2
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                    Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col1 (type: string), _col2 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col1 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col0 (type: string)
    + value expressions: p_name (type: string)
            Reduce Operator Tree:
              Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1, _col2
    + expressions: VALUE._col1 (type: string), KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    + outputColumnNames: _col1, _col2, _col5
                Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition
                        input alias: ptf_0
    - output shape: _col0: string, _col1: string, _col2: int
    + output shape: _col1: string, _col2: string, _col5: int
                        type: WINDOWING
                      Windowing table definition
                        input alias: ptf_1
                        name: windowingtablefunction
    - order by: _col2
    - partition by: _col1
    + order by: _col5
    + partition by: _col2
                        raw input shape:
                        window functions:
                            window function definition
                              alias: _wcol0
    - arguments: _col2
    + arguments: _col5
                              name: rank
                              window function: GenericUDAFRankEvaluator
                              window frame: PRECEDING(MAX)~FOLLOWING(MAX)
    @@ -1024,7 +1008,7 @@ STAGE PLANS:
                    predicate: (_wcol0 <= 2) (type: boolean)
                    Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
    - expressions: _col0 (type: string), _col1 (type: string)
    + expressions: _col1 (type: string), _col2 (type: string)
                      outputColumnNames: _col0, _col1
                      Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                      File Output Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/subquery_views.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/subquery_views.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/subquery_views.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/subquery_views.q.out Wed Apr 1 01:15:50 2015
    @@ -69,8 +69,8 @@ POSTHOOK: type: CREATEVIEW
      POSTHOOK: Input: default@src
      POSTHOOK: Output: database:default
      POSTHOOK: Output: default@cv2
    -Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
    -Warning: Shuffle Join JOIN[50][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[46][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
      PREHOOK: query: explain
      select *
      from cv2 where cv2.key in (select key from cv2 c where c.key < '11')
    @@ -378,8 +378,8 @@ STAGE PLANS:
            Processor Tree:
              ListSink

    -Warning: Shuffle Join JOIN[21][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
    -Warning: Shuffle Join JOIN[50][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[19][tables = [$hdt$_0, $hdt$_1]] in Stage 'Stage-1:MAPRED' is a cross product
    +Warning: Shuffle Join JOIN[46][tables = [$hdt$_1, $hdt$_2]] in Stage 'Stage-6:MAPRED' is a cross product
      PREHOOK: query: select *
      from cv2 where cv2.key in (select key from cv2 c where c.key < '11')
      PREHOOK: type: QUERY

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/table_access_keys_stats.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/table_access_keys_stats.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/table_access_keys_stats.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/table_access_keys_stats.q.out Wed Apr 1 01:15:50 2015
    @@ -22,7 +22,7 @@ SELECT key, count(1) FROM T1 GROUP BY ke
      PREHOOK: type: QUERY
      PREHOOK: Input: default@t1
      #### A masked pattern was here ####
    -Operator:GBY_4
    +Operator:GBY_3
      Table:default@t1
      Keys:key

    @@ -35,7 +35,7 @@ PREHOOK: query: SELECT key, val, count(1
      PREHOOK: type: QUERY
      PREHOOK: Input: default@t1
      #### A masked pattern was here ####
    -Operator:GBY_4
    +Operator:GBY_3
      Table:default@t1
      Keys:key,val

    @@ -50,7 +50,7 @@ SELECT key, count(1) FROM (SELECT key, v
      PREHOOK: type: QUERY
      PREHOOK: Input: default@t1
      #### A masked pattern was here ####
    -Operator:GBY_4
    +Operator:GBY_3
      Table:default@t1
      Keys:key

    @@ -63,7 +63,7 @@ PREHOOK: query: SELECT k, count(1) FROM
      PREHOOK: type: QUERY
      PREHOOK: Input: default@t1
      #### A masked pattern was here ####
    -Operator:GBY_4
    +Operator:GBY_3
      Table:default@t1
      Keys:key

    @@ -77,7 +77,7 @@ SELECT 1, key, count(1) FROM T1 GROUP BY
      PREHOOK: type: QUERY
      PREHOOK: Input: default@t1
      #### A masked pattern was here ####
    -Operator:GBY_4
    +Operator:GBY_3
      Table:default@t1
      Keys:key

    @@ -90,7 +90,7 @@ PREHOOK: query: SELECT key, 1, val, coun
      PREHOOK: type: QUERY
      PREHOOK: Input: default@t1
      #### A masked pattern was here ####
    -Operator:GBY_4
    +Operator:GBY_3
      Table:default@t1
      Keys:key,val

    @@ -104,7 +104,7 @@ PREHOOK: query: SELECT key, 1, val, 2, c
      PREHOOK: type: QUERY
      PREHOOK: Input: default@t1
      #### A masked pattern was here ####
    -Operator:GBY_4
    +Operator:GBY_3
      Table:default@t1
      Keys:key,val

    @@ -130,7 +130,7 @@ group by key + key
      PREHOOK: type: QUERY
      PREHOOK: Input: default@t1
      #### A masked pattern was here ####
    -Operator:GBY_4
    +Operator:GBY_3
      Table:default@t1
      Keys:key

    @@ -148,11 +148,11 @@ SELECT key, count(1) as c FROM T1 GROUP
      PREHOOK: type: QUERY
      PREHOOK: Input: default@t1
      #### A masked pattern was here ####
    -Operator:GBY_4
    +Operator:GBY_3
      Table:default@t1
      Keys:key

    -Operator:GBY_12
    +Operator:GBY_10
      Table:default@t1
      Keys:key

    @@ -218,7 +218,7 @@ group by key, constant, val
      PREHOOK: type: QUERY
      PREHOOK: Input: default@t1
      #### A masked pattern was here ####
    -Operator:GBY_5
    +Operator:GBY_3
      Table:default@t1
      Keys:key,val

    @@ -242,7 +242,7 @@ GROUP BY key, constant3, val
      PREHOOK: type: QUERY
      PREHOOK: Input: default@t1
      #### A masked pattern was here ####
    -Operator:GBY_5
    +Operator:GBY_3
      Table:default@t1
      Keys:key,val


    Modified: hive/branches/llap/ql/src/test/results/clientpositive/tez/subquery_in.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/tez/subquery_in.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/tez/subquery_in.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/tez/subquery_in.q.out Wed Apr 1 01:15:50 2015
    @@ -293,15 +293,11 @@ STAGE PLANS:
                      TableScan
                        alias: part
                        Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                          Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
              Reducer 2
                  Reduce Operator Tree:
                    Merge Join Operator
    @@ -323,24 +319,24 @@ STAGE PLANS:
                  Reduce Operator Tree:
                    Select Operator
                      expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1
    + outputColumnNames: _col2, _col5
                      Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                      PTF Operator
                        Function definitions:
                            Input definition
                              input alias: ptf_0
    - output shape: _col0: string, _col1: int
    + output shape: _col2: string, _col5: int
                              type: WINDOWING
                            Windowing table definition
                              input alias: ptf_1
                              name: windowingtablefunction
    - order by: _col1
    - partition by: _col0
    + order by: _col5
    + partition by: _col2
                              raw input shape:
                              window functions:
                                  window function definition
                                    alias: _wcol0
    - arguments: _col1
    + arguments: _col5
                                    name: rank
                                    window function: GenericUDAFRankEvaluator
                                    window frame: PRECEDING(MAX)~FOLLOWING(MAX)
    @@ -350,7 +346,7 @@ STAGE PLANS:
                          predicate: (_wcol0 <= 2) (type: boolean)
                          Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
    - expressions: _col1 (type: int)
    + expressions: _col5 (type: int)
                            outputColumnNames: _col0
                            Statistics: Num rows: 8 Data size: 968 Basic stats: COMPLETE Column stats: NONE
                            Group By Operator
    @@ -465,15 +461,11 @@ STAGE PLANS:
                      TableScan
                        alias: b
                        Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: p_mfgr (type: string), p_size (type: int)
    - outputColumnNames: _col0, _col1
    + Reduce Output Operator
    + key expressions: p_mfgr (type: string), p_size (type: int)
    + sort order: ++
    + Map-reduce partition columns: p_mfgr (type: string)
                          Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: int)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
              Reducer 2
                  Reduce Operator Tree:
                    Merge Join Operator
    @@ -499,34 +491,34 @@ STAGE PLANS:
                  Reduce Operator Tree:
                    Select Operator
                      expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: int)
    - outputColumnNames: _col0, _col1
    + outputColumnNames: _col2, _col5
                      Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                      PTF Operator
                        Function definitions:
                            Input definition
                              input alias: ptf_0
    - output shape: _col0: string, _col1: int
    + output shape: _col2: string, _col5: int
                              type: WINDOWING
                            Windowing table definition
                              input alias: ptf_1
                              name: windowingtablefunction
    - order by: _col1
    - partition by: _col0
    + order by: _col5
    + partition by: _col2
                              raw input shape:
                              window functions:
                                  window function definition
                                    alias: _wcol0
    - arguments: _col1
    + arguments: _col5
                                    name: rank
                                    window function: GenericUDAFRankEvaluator
                                    window frame: PRECEDING(MAX)~FOLLOWING(MAX)
                                    isPivotResult: true
                        Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((_wcol0 <= 2) and _col0 is not null) (type: boolean)
    + predicate: ((_wcol0 <= 2) and _col2 is not null) (type: boolean)
                          Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
    - expressions: _col0 (type: string), _col1 (type: int)
    + expressions: _col2 (type: string), _col5 (type: int)
                            outputColumnNames: _col0, _col1
                            Statistics: Num rows: 4 Data size: 484 Basic stats: COMPLETE Column stats: NONE
                            Group By Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_expressions.q.out Wed Apr 1 01:15:50 2015
    @@ -23,9 +23,9 @@ STAGE PLANS:
              TableScan
                alias: decimal_test
                Filter Operator
    - predicate: (((((cdecimal1 > CAST( 0 AS decimal(20,10))) and (cdecimal1 < CAST( 12345.5678 AS decimal(25,15)))) and (cdecimal2 <> CAST( 0 AS decimal(24,14)))) and (cdecimal2 > CAST( 1000 AS decimal(24,14)))) and cdouble is not null) (type: boolean)
    + predicate: (((((cdecimal1 > 0) and (cdecimal1 < 12345.5678)) and (cdecimal2 <> 0)) and (cdecimal2 > 1000)) and cdouble is not null) (type: boolean)
                  Select Operator
    - expressions: (cdecimal1 + cdecimal2) (type: decimal(25,14)), (cdecimal1 - (CAST( 2 AS decimal(10,0)) * cdecimal2)) (type: decimal(26,14)), ((UDFToDouble(cdecimal1) + 2.34) / UDFToDouble(cdecimal2)) (type: double), (UDFToDouble(cdecimal1) * (UDFToDouble(cdecimal2) / 3.4)) (type: double), (cdecimal1 % CAST( 10 AS decimal(10,0))) (type: decimal(12,10)), UDFToInteger(cdecimal1) (type: int), UDFToShort(cdecimal2) (type: smallint), UDFToByte(cdecimal2) (type: tinyint), UDFToLong(cdecimal1) (type: bigint), UDFToBoolean(cdecimal1) (type: boolean), UDFToDouble(cdecimal2) (type: double), UDFToFloat(cdecimal1) (type: float), UDFToString(cdecimal2) (type: string), CAST( cdecimal1 AS TIMESTAMP) (type: timestamp)
    + expressions: (cdecimal1 + cdecimal2) (type: decimal(25,14)), (cdecimal1 - (2 * cdecimal2)) (type: decimal(26,14)), ((UDFToDouble(cdecimal1) + 2.34) / UDFToDouble(cdecimal2)) (type: double), (UDFToDouble(cdecimal1) * (UDFToDouble(cdecimal2) / 3.4)) (type: double), (cdecimal1 % 10) (type: decimal(12,10)), UDFToInteger(cdecimal1) (type: int), UDFToShort(cdecimal2) (type: smallint), UDFToByte(cdecimal2) (type: tinyint), UDFToLong(cdecimal1) (type: bigint), UDFToBoolean(cdecimal1) (type: boolean), UDFToDouble(cdecimal2) (type: double), UDFToFloat(cdecimal1) (type: float), UDFToString(cdecimal2) (type: string), CAST( cdecimal1 AS TIMESTAMP) (type: timestamp)
                    outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
                    Limit
                      Number of rows: 10

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_udf.q.out Wed Apr 1 01:15:50 2015
    @@ -820,7 +820,7 @@ STAGE PLANS:
                        alias: decimal_udf
                        Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key * CAST( value AS decimal(10,0))) > CAST( 0 AS decimal(31,10))) (type: boolean)
    + predicate: ((key * CAST( value AS decimal(10,0))) > 0) (type: boolean)
                          Statistics: Num rows: 12 Data size: 1356 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: key (type: decimal(20,10)), value (type: int)
    @@ -1142,7 +1142,7 @@ STAGE PLANS:
                        alias: decimal_udf
                        Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                        Select Operator
    - expressions: (key / CAST( 0 AS decimal(10,0))) (type: decimal(22,12))
    + expressions: (key / 0) (type: decimal(22,12))
                          outputColumnNames: _col0
                          Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                          Limit
    @@ -1240,7 +1240,7 @@ STAGE PLANS:
                        alias: decimal_udf
                        Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (key <> CAST( 0 AS decimal(20,10))) (type: boolean)
    + predicate: (key <> 0) (type: boolean)
                          Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: (key / key) (type: decimal(38,24))
    @@ -2230,7 +2230,7 @@ STAGE PLANS:
                        alias: decimal_udf
                        Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                        Select Operator
    - expressions: ((key + CAST( 1 AS decimal(10,0))) % (key / CAST( 2 AS decimal(10,0)))) (type: decimal(22,12))
    + expressions: ((key + 1) % (key / 2)) (type: decimal(22,12))
                          outputColumnNames: _col0
                          Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                          File Output Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_udf2.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_udf2.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_udf2.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/tez/vector_decimal_udf2.q.out Wed Apr 1 01:15:50 2015
    @@ -71,10 +71,10 @@ STAGE PLANS:
                        alias: decimal_udf2
                        Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (key = CAST( 10 AS decimal(20,10))) (type: boolean)
    + predicate: (key = 10) (type: boolean)
                          Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
    - expressions: acos(key) (type: double), asin(key) (type: double), atan(key) (type: double), cos(key) (type: double), sin(key) (type: double), tan(key) (type: double), radians(key) (type: double)
    + expressions: NaN (type: double), NaN (type: double), 1.4711276743037347 (type: double), -0.8390715290764524 (type: double), -0.5440211108893698 (type: double), 0.6483608274590866 (type: double), 0.17453292519943295 (type: double)
                            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
                            Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                            File Output Operator
    @@ -132,10 +132,10 @@ STAGE PLANS:
                        alias: decimal_udf2
                        Statistics: Num rows: 38 Data size: 4296 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (key = CAST( 10 AS decimal(20,10))) (type: boolean)
    + predicate: (key = 10) (type: boolean)
                          Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
    - expressions: exp(key) (type: double), ln(key) (type: double), log(key) (type: double), log(key, key) (type: double), log(key, value) (type: double), log(value, key) (type: double), log10(key) (type: double), sqrt(key) (type: double)
    + expressions: 22026.465794806718 (type: double), 2.302585092994046 (type: double), 2.302585092994046 (type: double), 1.0 (type: double), log(10, value) (type: double), log(value, 10) (type: double), 1.0 (type: double), 3.1622776601683795 (type: double)
                            outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7
                            Statistics: Num rows: 19 Data size: 2148 Basic stats: COMPLETE Column stats: NONE
                            File Output Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/udaf_percentile_approx_23.q.out Wed Apr 1 01:15:50 2015
    @@ -505,11 +505,11 @@ POSTHOOK: Input: default@t12
      #### A masked pattern was here ####
      [26.0,255.5,479.0,491.0]
      PREHOOK: query: -- NaN
    -explain
    +explain
      select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket
      PREHOOK: type: QUERY
      POSTHOOK: query: -- NaN
    -explain
    +explain
      select percentile_approx(case when key < 100 then cast('NaN' as double) else key end, 0.5) from bucket
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
    @@ -566,11 +566,11 @@ POSTHOOK: Input: default@bucket
      #### A masked pattern was here ####
      341.5
      PREHOOK: query: -- with CBO
    -explain
    +explain
      select percentile_approx(key, 0.5) from bucket
      PREHOOK: type: QUERY
      POSTHOOK: query: -- with CBO
    -explain
    +explain
      select percentile_approx(key, 0.5) from bucket
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
    @@ -617,12 +617,12 @@ STAGE PLANS:
            Processor Tree:
              ListSink

    -PREHOOK: query: select percentile_approx(key, 0.5) from bucket
    +PREHOOK: query: select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket
      PREHOOK: type: QUERY
      PREHOOK: Input: default@bucket
      #### A masked pattern was here ####
    -POSTHOOK: query: select percentile_approx(key, 0.5) from bucket
    +POSTHOOK: query: select percentile_approx(key, 0.5) between 255.0 and 257.0 from bucket
      POSTHOOK: type: QUERY
      POSTHOOK: Input: default@bucket
      #### A masked pattern was here ####
    -255.5
    +true

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/udf7.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/udf7.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/udf7.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/udf7.q.out Wed Apr 1 01:15:50 2015
    @@ -36,33 +36,21 @@ SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1
             POW(CAST (2 AS DECIMAL), CAST(3 AS INT)) FROM dest1
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
    - Stage-1 is a root stage
    - Stage-0 depends on stages: Stage-1
    + Stage-0 is a root stage

      STAGE PLANS:
    - Stage: Stage-1
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - alias: dest1
    - Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE
    - Select Operator
    - expressions: 1.098612288668 (type: double), null (type: void), null (type: void), 1.098612288668 (type: double), null (type: void), null (type: void), 1.584962500721 (type: double), null (type: void), null (type: void), 0.47712125472 (type: double), null (type: void), null (type: void), 1.584962500721 (type: double), null (type: void), null (type: void), null (type: void), -1.0 (type: double), 7.389056098931 (type: double), 8.0 (type: double), 8.0 (type: double), 0.125 (type: double), 8.0 (type: double), 2.0 (type: double), NaN (type: double), 1.0 (type: double), power(CAST( 1 AS decimal(10,0)), 0) (type: double), power(CAST( 2 AS decimal(10,0)), 3) (type: double), power(CAST( 2 AS decimal(10,0)), 3) (type: double)
    - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27
    - Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -
        Stage: Stage-0
          Fetch Operator
            limit: -1
            Processor Tree:
    - ListSink
    + TableScan
    + alias: dest1
    + Statistics: Num rows: 1 Data size: 7 Basic stats: COMPLETE Column stats: COMPLETE
    + Select Operator
    + expressions: 1.098612288668 (type: double), null (type: void), null (type: void), 1.098612288668 (type: double), null (type: void), null (type: void), 1.584962500721 (type: double), null (type: void), null (type: void), 0.47712125472 (type: double), null (type: void), null (type: void), 1.584962500721 (type: double), null (type: void), null (type: void), null (type: void), -1.0 (type: double), 7.389056098931 (type: double), 8.0 (type: double), 8.0 (type: double), 0.125 (type: double), 8.0 (type: double), 2.0 (type: double), NaN (type: double), 1.0 (type: double), 1.0 (type: double), 8.0 (type: double), 8.0 (type: double)
    + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27
    + Statistics: Num rows: 1 Data size: 136 Basic stats: COMPLETE Column stats: COMPLETE
    + ListSink

      PREHOOK: query: SELECT ROUND(LN(3.0),12), LN(0.0), LN(-1), ROUND(LOG(3.0),12), LOG(0.0),
             LOG(-1), ROUND(LOG2(3.0),12), LOG2(0.0), LOG2(-1),

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/udf_format_number.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/udf_format_number.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/udf_format_number.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/udf_format_number.q.out Wed Apr 1 01:15:50 2015
    @@ -211,3 +211,20 @@ POSTHOOK: type: QUERY
      POSTHOOK: Input: default@src
      #### A masked pattern was here ####
      12,332.1235 12,332.12 12,332.1000 -12,332 12,332.6000
    +PREHOOK: query: -- nulls
    +SELECT
    + format_number(cast(null as int), 0),
    + format_number(12332.123456BD, cast(null as int)),
    + format_number(cast(null as int), cast(null as int))
    +PREHOOK: type: QUERY
    +PREHOOK: Input: _dummy_database@_dummy_table
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- nulls
    +SELECT
    + format_number(cast(null as int), 0),
    + format_number(12332.123456BD, cast(null as int)),
    + format_number(cast(null as int), cast(null as int))
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: _dummy_database@_dummy_table
    +#### A masked pattern was here ####
    +NULL NULL NULL

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/udf_reflect2.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/udf_reflect2.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/udf_reflect2.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/udf_reflect2.q.out Wed Apr 1 01:15:50 2015
    @@ -320,17 +320,13 @@ STAGE PLANS:
                Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                GatherStats: false
                Select Operator
    - expressions: UDFToInteger(key) (type: int), value (type: string)
    - outputColumnNames: _col0, _col1
    + expressions: UDFToInteger(key) (type: int), reflect2(UDFToInteger(key),'byteValue') (type: tinyint), reflect2(UDFToInteger(key),'shortValue') (type: smallint), reflect2(UDFToInteger(key),'intValue') (type: int), reflect2(UDFToInteger(key),'longValue') (type: bigint), reflect2(UDFToInteger(key),'floatValue') (type: float), reflect2(UDFToInteger(key),'doubleValue') (type: double), reflect2(UDFToInteger(key),'toString') (type: string), value (type: string), reflect2(value,'concat','_concat') (type: string), reflect2(value,'contains','86') (type: boolean), reflect2(value,'startsWith','v') (type: boolean), reflect2(value,'endsWith','6') (type: boolean), reflect2(value,'equals','val_86') (type: boolean), reflect2(value,'equalsIgnoreCase','VAL_86') (type: boolean), reflect2(value,'getBytes') (type: binary), reflect2(value,'indexOf','1') (type: int), reflect2(value,'lastIndexOf','1') (type: int), reflect2(value,'replace','val','VALUE') (type: string), reflect2(value,'substring',
      1) (type: string), reflect2(value,'substring',1,5) (type: string), reflect2(value,'toUpperCase') (type: string), reflect2(value,'trim') (type: string), 2013-02-15 19:41:20.0 (type: timestamp), 113 (type: int), 1 (type: int), 5 (type: int), 19 (type: int), 41 (type: int), 20 (type: int), 1360986080000 (type: bigint)
    + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Select Operator
    - expressions: _col0 (type: int), reflect2(_col0,'byteValue') (type: tinyint), reflect2(_col0,'shortValue') (type: smallint), reflect2(_col0,'intValue') (type: int), reflect2(_col0,'longValue') (type: bigint), reflect2(_col0,'floatValue') (type: float), reflect2(_col0,'doubleValue') (type: double), reflect2(_col0,'toString') (type: string), _col1 (type: string), reflect2(_col1,'concat','_concat') (type: string), reflect2(_col1,'contains','86') (type: boolean), reflect2(_col1,'startsWith','v') (type: boolean), reflect2(_col1,'endsWith','6') (type: boolean), reflect2(_col1,'equals','val_86') (type: boolean), reflect2(_col1,'equalsIgnoreCase','VAL_86') (type: boolean), reflect2(_col1,'getBytes') (type: binary), reflect2(_col1,'indexOf','1') (type: int), reflect2(_col1,'lastIndexOf','1') (type: int), reflect2(_col1,'replace','val','VALUE') (type: string), reflect2(_col1,'substring',1) (type: string), reflect2(_col1,'substring',1,5) (type: string), reflect2(_col1,'toUpperCase
      ') (type: string), reflect2(_col1,'trim') (type: string), 2013-02-15 19:41:20.0 (type: timestamp), 113 (type: int), 1 (type: int), 5 (type: int), 19 (type: int), 41 (type: int), 20 (type: int), 1360986080000 (type: bigint)
    - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30
    - Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    - Limit
    - Number of rows: 5
    - Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
    - ListSink
    + Limit
    + Number of rows: 5
    + Statistics: Num rows: 5 Data size: 50 Basic stats: COMPLETE Column stats: NONE
    + ListSink

      PREHOOK: query: SELECT key,
             reflect2(key, "byteValue"),

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/union24.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/union24.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/union24.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/union24.q.out Wed Apr 1 01:15:50 2015
    @@ -262,7 +262,7 @@ STAGE PLANS:
                    name: default.src5
                  name: default.src5
            Truncated Path -> Alias:
    - /src5 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:src5]
    + /src5 [null-subquery2:$hdt$_0-subquery2:$hdt$_0:src5]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/unionDistinct_1.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/unionDistinct_1.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/unionDistinct_1.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/unionDistinct_1.q.out Wed Apr 1 01:15:50 2015
    @@ -9089,7 +9089,7 @@ STAGE PLANS:
                    name: default.src5
                  name: default.src5
            Truncated Path -> Alias:
    - /src5 [$hdt$_0-subquery2:$hdt$_0-subquery2:$hdt$_0:$hdt$_0:src5]
    + /src5 [$hdt$_0-subquery2:$hdt$_0-subquery2:$hdt$_0:src5]
            Needs Tagging: false
            Reduce Operator Tree:
              Group By Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out Wed Apr 1 01:15:50 2015
    @@ -24,10 +24,10 @@ STAGE PLANS:
                  alias: decimal_test
                  Statistics: Num rows: 12288 Data size: 2128368 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((((cdecimal1 > CAST( 0 AS decimal(20,10))) and (cdecimal1 < CAST( 12345.5678 AS decimal(25,15)))) and (cdecimal2 <> CAST( 0 AS decimal(24,14)))) and (cdecimal2 > CAST( 1000 AS decimal(24,14)))) and cdouble is not null) (type: boolean)
    + predicate: (((((cdecimal1 > 0) and (cdecimal1 < 12345.5678)) and (cdecimal2 <> 0)) and (cdecimal2 > 1000)) and cdouble is not null) (type: boolean)
                    Statistics: Num rows: 228 Data size: 39491 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
    - expressions: (cdecimal1 + cdecimal2) (type: decimal(25,14)), (cdecimal1 - (CAST( 2 AS decimal(10,0)) * cdecimal2)) (type: decimal(26,14)), ((UDFToDouble(cdecimal1) + 2.34) / UDFToDouble(cdecimal2)) (type: double), (UDFToDouble(cdecimal1) * (UDFToDouble(cdecimal2) / 3.4)) (type: double), (cdecimal1 % CAST( 10 AS decimal(10,0))) (type: decimal(12,10)), UDFToInteger(cdecimal1) (type: int), UDFToShort(cdecimal2) (type: smallint), UDFToByte(cdecimal2) (type: tinyint), UDFToLong(cdecimal1) (type: bigint), UDFToBoolean(cdecimal1) (type: boolean), UDFToDouble(cdecimal2) (type: double), UDFToFloat(cdecimal1) (type: float), UDFToString(cdecimal2) (type: string), CAST( cdecimal1 AS TIMESTAMP) (type: timestamp)
    + expressions: (cdecimal1 + cdecimal2) (type: decimal(25,14)), (cdecimal1 - (2 * cdecimal2)) (type: decimal(26,14)), ((UDFToDouble(cdecimal1) + 2.34) / UDFToDouble(cdecimal2)) (type: double), (UDFToDouble(cdecimal1) * (UDFToDouble(cdecimal2) / 3.4)) (type: double), (cdecimal1 % 10) (type: decimal(12,10)), UDFToInteger(cdecimal1) (type: int), UDFToShort(cdecimal2) (type: smallint), UDFToByte(cdecimal2) (type: tinyint), UDFToLong(cdecimal1) (type: bigint), UDFToBoolean(cdecimal1) (type: boolean), UDFToDouble(cdecimal2) (type: double), UDFToFloat(cdecimal1) (type: float), UDFToString(cdecimal2) (type: string), CAST( cdecimal1 AS TIMESTAMP) (type: timestamp)
                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13
                      Statistics: Num rows: 228 Data size: 39491 Basic stats: COMPLETE Column stats: NONE
                      Limit

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_round.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_round.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_round.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_round.q.out Wed Apr 1 01:15:50 2015
    @@ -102,17 +102,17 @@ STAGE PLANS:
                  alias: decimal_tbl_txt
                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                  Select Operator
    - expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0))
    - outputColumnNames: _col0, _col1
    + expressions: dec (type: decimal(10,0))
    + outputColumnNames: _col0
                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: round(_col0, (- 1)) (type: decimal(11,0))
                      sort order: +
                      Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col0 (type: decimal(10,0)), _col1 (type: decimal(11,0))
    + value expressions: _col0 (type: decimal(10,0))
            Reduce Operator Tree:
              Select Operator
    - expressions: VALUE._col0 (type: decimal(10,0)), VALUE._col1 (type: decimal(11,0))
    + expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0))
                outputColumnNames: _col0, _col1
                Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                File Output Operator
    @@ -238,17 +238,17 @@ STAGE PLANS:
                  alias: decimal_tbl_rc
                  Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                  Select Operator
    - expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0))
    - outputColumnNames: _col0, _col1
    + expressions: dec (type: decimal(10,0))
    + outputColumnNames: _col0
                    Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: round(_col0, (- 1)) (type: decimal(11,0))
                      sort order: +
                      Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col0 (type: decimal(10,0)), _col1 (type: decimal(11,0))
    + value expressions: _col0 (type: decimal(10,0))
            Reduce Operator Tree:
              Select Operator
    - expressions: VALUE._col0 (type: decimal(10,0)), VALUE._col1 (type: decimal(11,0))
    + expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0))
                outputColumnNames: _col0, _col1
                Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
                File Output Operator
    @@ -375,18 +375,18 @@ STAGE PLANS:
                  alias: decimal_tbl_orc
                  Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                  Select Operator
    - expressions: dec (type: decimal(10,0)), round(dec, -1) (type: decimal(11,0))
    - outputColumnNames: _col0, _col1
    + expressions: dec (type: decimal(10,0))
    + outputColumnNames: _col0
                    Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: round(_col0, (- 1)) (type: decimal(11,0))
                      sort order: +
                      Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col0 (type: decimal(10,0)), _col1 (type: decimal(11,0))
    + value expressions: _col0 (type: decimal(10,0))
            Execution mode: vectorized
            Reduce Operator Tree:
              Select Operator
    - expressions: VALUE._col0 (type: decimal(10,0)), VALUE._col1 (type: decimal(11,0))
    + expressions: VALUE._col0 (type: decimal(10,0)), KEY.reducesinkkey0 (type: decimal(11,0))
                outputColumnNames: _col0, _col1
                Statistics: Num rows: 1 Data size: 112 Basic stats: COMPLETE Column stats: NONE
                File Output Operator

    Modified: hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out
    URL: http://svn.apache.org/viewvc/hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out?rev=1670534&r1=1670533&r2=1670534&view=diff
    ==============================================================================
    --- hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out (original)
    +++ hive/branches/llap/ql/src/test/results/clientpositive/vector_decimal_round_2.q.out Wed Apr 1 01:15:50 2015
    @@ -431,7 +431,7 @@ STAGE PLANS:
                  alias: decimal_tbl_4_orc
                  Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                  Select Operator
    - expressions: round(pos, 9) (type: decimal(30,9)), round(neg, 9) (type: decimal(30,9)), round(1809242.3151111344, 9) (type: decimal(17,9)), round((- 1809242.3151111344), 9) (type: decimal(17,9))
    + expressions: round(pos, 9) (type: decimal(30,9)), round(neg, 9) (type: decimal(30,9)), 1809242.315111134 (type: decimal(17,9)), -1809242.315111134 (type: decimal(17,9))
                    outputColumnNames: _col0, _col1, _col2, _col3
                    Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
    @@ -439,6 +439,7 @@ STAGE PLANS:
                      sort order: +
                      Statistics: Num rows: 1 Data size: 224 Basic stats: COMPLETE Column stats: NONE
                      value expressions: _col1 (type: decimal(30,9)), _col2 (type: decimal(17,9)), _col3 (type: decimal(17,9))
    + Execution mode: vectorized
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: decimal(30,9)), VALUE._col0 (type: decimal(30,9)), VALUE._col1 (type: decimal(17,9)), VALUE._col2 (type: decimal(17,9))

Related Discussions

Discussion Navigation
viewthread | post
Discussion Overview
groupcommits @
categorieshive, hadoop
postedApr 1, '15 at 1:15a
activeApr 1, '15 at 1:15a
posts8
users1
websitehive.apache.org

1 user in discussion

Prasanthj: 8 posts

People

Translate

site design / logo © 2021 Grokbase