FAQ
Repository: hive
Updated Branches:
   refs/heads/hbase-metastore 0fa45e4a5 -> 2fe60861d


HIVE-11496: Better tests for evaluating ORC predicate pushdown (Prasanth Jayachandran reviewed by Sergey Shelukhin)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/7536edec
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/7536edec
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/7536edec

Branch: refs/heads/hbase-metastore
Commit: 7536edec1dc39028fca7d53f1a09aa56f9531682
Parents: 3e68cdc
Author: Prasanth Jayachandran <j.prasanth.j@gmail.com>
Authored: Sun Aug 9 16:58:52 2015 -0700
Committer: Prasanth Jayachandran <j.prasanth.j@gmail.com>
Committed: Sun Aug 9 16:58:52 2015 -0700

----------------------------------------------------------------------
  .../test/resources/testconfiguration.properties | 1 +
  .../ql/hooks/PostExecTezSummaryPrinter.java | 72 ++
  .../hadoop/hive/ql/io/orc/RecordReaderImpl.java | 18 +-
  .../hive/ql/io/sarg/ConvertAstToSearchArg.java | 14 +-
  .../test/queries/clientpositive/orc_ppd_basic.q | 177 +++++
  .../clientpositive/tez/orc_ppd_basic.q.out | 701 +++++++++++++++++++
  6 files changed, 975 insertions(+), 8 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/7536edec/itests/src/test/resources/testconfiguration.properties
----------------------------------------------------------------------
diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
index c710b0b..bed621d 100644
--- a/itests/src/test/resources/testconfiguration.properties
+++ b/itests/src/test/resources/testconfiguration.properties
@@ -331,6 +331,7 @@ minitez.query.files=bucket_map_join_tez1.q,\
    mapjoin_decimal.q,\
    lvj_mapjoin.q, \
    mrr.q,\
+ orc_ppd_basic.q,\
    tez_bmj_schema_evolution.q,\
    tez_dml.q,\
    tez_fsstat.q,\

http://git-wip-us.apache.org/repos/asf/hive/blob/7536edec/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
new file mode 100644
index 0000000..60c587f
--- /dev/null
+++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java
@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p/>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p/>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.ql.hooks;
+
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.ql.QueryPlan;
+import org.apache.hadoop.hive.ql.exec.Utilities;
+import org.apache.hadoop.hive.ql.exec.tez.TezTask;
+import org.apache.hadoop.hive.ql.session.SessionState;
+import org.apache.tez.common.counters.CounterGroup;
+import org.apache.tez.common.counters.TezCounter;
+import org.apache.tez.common.counters.TezCounters;
+
+/**
+ * Post execution hook to print hive tez counters to console error stream.
+ */
+public class PostExecTezSummaryPrinter implements ExecuteWithHookContext {
+ private static final Log LOG = LogFactory.getLog(PostExecTezSummaryPrinter.class.getName());
+
+ @Override
+ public void run(HookContext hookContext) throws Exception {
+ assert (hookContext.getHookType() == HookContext.HookType.POST_EXEC_HOOK);
+ HiveConf conf = hookContext.getConf();
+ if (!"tez".equals(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE))) {
+ return;
+ }
+
+ LOG.info("Executing post execution hook to print tez summary..");
+ SessionState ss = SessionState.get();
+ SessionState.LogHelper console = ss.getConsole();
+ QueryPlan plan = hookContext.getQueryPlan();
+ if (plan == null) {
+ return;
+ }
+
+ List<TezTask> rootTasks = Utilities.getTezTasks(plan.getRootTasks());
+ for (TezTask tezTask : rootTasks) {
+ LOG.info("Printing summary for tez task: " + tezTask.getName());
+ TezCounters counters = tezTask.getTezCounters();
+ if (counters != null) {
+ for (CounterGroup group : counters) {
+ if ("HIVE".equals(group.getDisplayName())) {
+ console.printError(tezTask.getId() + " HIVE COUNTERS:");
+ for (TezCounter counter : group) {
+ console.printError(" " + counter.getDisplayName() + ": " + counter.getValue());
+ }
+ }
+ }
+ }
+ }
+ }
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/7536edec/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
index f85420d..0d765b1 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java
@@ -376,7 +376,7 @@ class RecordReaderImpl implements RecordReader {
        Object predObj = getBaseObjectForComparison(predicate.getType(), baseObj);

        result = evaluatePredicateMinMax(predicate, predObj, minValue, maxValue, hasNull);
- if (bloomFilter != null && result != TruthValue.NO_NULL && result != TruthValue.NO) {
+ if (shouldEvaluateBloomFilter(predicate, result, bloomFilter)) {
          result = evaluatePredicateBloomFilter(predicate, predObj, bloomFilter, hasNull);
        }
        // in case failed conversion, return the default YES_NO_NULL truth value
@@ -394,6 +394,22 @@ class RecordReaderImpl implements RecordReader {
      return result;
    }

+ private static boolean shouldEvaluateBloomFilter(PredicateLeaf predicate,
+ TruthValue result, BloomFilterIO bloomFilter) {
+ // evaluate bloom filter only when
+ // 1) Bloom filter is available
+ // 2) Min/Max evaluation yield YES or MAYBE
+ // 3) Predicate is EQUALS or IN list
+ if (bloomFilter != null
+ && result != TruthValue.NO_NULL && result != TruthValue.NO
+ && (predicate.getOperator().equals(PredicateLeaf.Operator.EQUALS)
+ || predicate.getOperator().equals(PredicateLeaf.Operator.NULL_SAFE_EQUALS)
+ || predicate.getOperator().equals(PredicateLeaf.Operator.IN))) {
+ return true;
+ }
+ return false;
+ }
+
    private static TruthValue evaluatePredicateMinMax(PredicateLeaf predicate, Object predObj,
        Object minValue,
        Object maxValue,

http://git-wip-us.apache.org/repos/asf/hive/blob/7536edec/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
----------------------------------------------------------------------
diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
index eb8c03f..5c4b7ea 100644
--- a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
+++ b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/ConvertAstToSearchArg.java
@@ -18,14 +18,15 @@

  package org.apache.hadoop.hive.ql.io.sarg;

-import com.esotericsoftware.kryo.Kryo;
-import com.esotericsoftware.kryo.io.Input;
+import java.sql.Date;
+import java.sql.Timestamp;
+import java.util.List;
+
  import org.apache.commons.codec.binary.Base64;
  import org.apache.commons.logging.Log;
  import org.apache.commons.logging.LogFactory;
  import org.apache.hadoop.conf.Configuration;
  import org.apache.hadoop.hive.common.type.HiveChar;
-import org.apache.hadoop.hive.common.type.HiveVarchar;
  import org.apache.hadoop.hive.ql.exec.Utilities;
  import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc;
  import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
@@ -51,9 +52,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector;
  import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo;
  import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;

-import java.sql.Date;
-import java.sql.Timestamp;
-import java.util.List;
+import com.esotericsoftware.kryo.Kryo;
+import com.esotericsoftware.kryo.io.Input;

  public class ConvertAstToSearchArg {
    private static final Log LOG = LogFactory.getLog(ConvertAstToSearchArg.class);
@@ -145,7 +145,7 @@ public class ConvertAstToSearchArg {
          return ((Number) lit).longValue();
        case STRING:
          if (lit instanceof HiveChar) {
- lit = ((HiveChar) lit).getPaddedValue();
+ return ((HiveChar) lit).getPaddedValue();
          } else if (lit instanceof String) {
            return lit;
          } else {

http://git-wip-us.apache.org/repos/asf/hive/blob/7536edec/ql/src/test/queries/clientpositive/orc_ppd_basic.q
----------------------------------------------------------------------
diff --git a/ql/src/test/queries/clientpositive/orc_ppd_basic.q b/ql/src/test/queries/clientpositive/orc_ppd_basic.q
new file mode 100644
index 0000000..f9dafef
--- /dev/null
+++ b/ql/src/test/queries/clientpositive/orc_ppd_basic.q
@@ -0,0 +1,177 @@
+SET hive.fetch.task.conversion=none;
+SET hive.optimize.index.filter=true;
+SET hive.cbo.enable=false;
+
+CREATE TABLE staging(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE;
+
+LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging;
+LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging;
+
+CREATE TABLE orc_ppd_staging(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ c char(50),
+ v varchar(50),
+ da date,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
+
+insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from staging order by t, s;
+
+-- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values
+-- which makes it hard to test bloom filters
+insert into orc_ppd_staging select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11","1990-03-11 10:11:58.703308",-71.54,"aaa" from staging limit 1;
+insert into orc_ppd_staging select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11","2023-03-11 10:11:58.703308",71.54,"zzz" from staging limit 1;
+
+CREATE TABLE orc_ppd(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ c char(50),
+ v varchar(50),
+ da date,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*");
+
+insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from orc_ppd_staging order by t, s;
+
+SET hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.PostExecTezSummaryPrinter;
+
+-- Row group statistics for column t:
+-- Entry 0: count: 994 hasNull: true min: -10 max: 54 sum: 26014 positions: 0,0,0,0,0,0,0
+-- Entry 1: count: 1000 hasNull: false min: 54 max: 118 sum: 86812 positions: 0,2,124,0,0,116,11
+-- Entry 2: count: 100 hasNull: false min: 118 max: 127 sum: 12151 positions: 0,4,119,0,0,244,19
+
+-- INPUT_RECORDS: 2100 (all row groups)
+select count(*) from orc_ppd;
+
+-- INPUT_RECORDS: 0 (no row groups)
+select count(*) from orc_ppd where t > 127;
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = 55;
+select count(*) from orc_ppd where t <=> 50;
+select count(*) from orc_ppd where t <=> 100;
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t = "54";
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = -10.0;
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = cast(53 as float);
+select count(*) from orc_ppd where t = cast(53 as double);
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t < 100;
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t < 100 and t > 98;
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t <= 100;
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t is null;
+
+-- INPUT_RECORDS: 1100 (2 row groups)
+select count(*) from orc_ppd where t in (5, 120);
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t between 60 and 80;
+
+-- bloom filter tests
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where t = -100;
+select count(*) from orc_ppd where t <=> -100;
+select count(*) from orc_ppd where t = 125;
+select count(*) from orc_ppd where t IN (-100, 125, 200);
+
+-- Row group statistics for column s:
+-- Entry 0: count: 1000 hasNull: false min: max: zach young sum: 12907 positions: 0,0,0
+-- Entry 1: count: 1000 hasNull: false min: alice allen max: zach zipper sum: 12704 positions: 0,1611,191
+-- Entry 2: count: 100 hasNull: false min: bob davidson max: zzz sum: 1281 positions: 0,3246,373
+
+-- INPUT_RECORDS: 0 (no row groups)
+select count(*) from orc_ppd where s > "zzz";
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where s = "zach young";
+select count(*) from orc_ppd where s <=> "zach zipper";
+select count(*) from orc_ppd where s <=> "";
+
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s is null;
+
+-- INPUT_RECORDS: 2100
+select count(*) from orc_ppd where s is not null;
+
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s = cast("zach young" as char(50));
+
+-- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where s = cast("zach young" as char(10));
+select count(*) from orc_ppd where s = cast("zach young" as varchar(10));
+select count(*) from orc_ppd where s = cast("zach young" as varchar(50));
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s < "b";
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s > "alice" and s < "bob";
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s in ("alice allen", "");
+
+-- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s between "" and "alice allen";
+
+-- INPUT_RECORDS: 100 (1 row group)
+select count(*) from orc_ppd where s between "zz" and "zzz";
+
+-- INPUT_RECORDS: 1100 (2 row groups)
+select count(*) from orc_ppd where s between "zach zipper" and "zzz";
+
+-- bloom filter tests
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s = "hello world";
+select count(*) from orc_ppd where s <=> "apache hive";
+select count(*) from orc_ppd where s IN ("a", "z");
+
+-- INPUT_RECORDS: 100
+select count(*) from orc_ppd where s = "sarah ovid";
+
+-- INPUT_RECORDS: 1100
+select count(*) from orc_ppd where s = "wendy king";
+
+-- INPUT_RECORDS: 1000
+select count(*) from orc_ppd where s = "wendy king" and t < 0;
+
+-- INPUT_RECORDS: 100
+select count(*) from orc_ppd where s = "wendy king" and t > 100;
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/7536edec/ql/src/test/results/clientpositive/tez/orc_ppd_basic.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/orc_ppd_basic.q.out b/ql/src/test/results/clientpositive/tez/orc_ppd_basic.q.out
new file mode 100644
index 0000000..2d0984b
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/orc_ppd_basic.q.out
@@ -0,0 +1,701 @@
+PREHOOK: query: CREATE TABLE staging(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@staging
+POSTHOOK: query: CREATE TABLE staging(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'
+STORED AS TEXTFILE
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@staging
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' OVERWRITE INTO TABLE staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@staging
+PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging
+PREHOOK: type: LOAD
+#### A masked pattern was here ####
+PREHOOK: Output: default@staging
+POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/over1k' INTO TABLE staging
+POSTHOOK: type: LOAD
+#### A masked pattern was here ####
+POSTHOOK: Output: default@staging
+PREHOOK: query: CREATE TABLE orc_ppd_staging(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ c char(50),
+ v varchar(50),
+ da date,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_ppd_staging
+POSTHOOK: query: CREATE TABLE orc_ppd_staging(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ c char(50),
+ v varchar(50),
+ da date,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_ppd_staging
+PREHOOK: query: insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from staging order by t, s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@orc_ppd_staging
+POSTHOOK: query: insert overwrite table orc_ppd_staging select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from staging order by t, s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@orc_ppd_staging
+POSTHOOK: Lineage: orc_ppd_staging.b SIMPLE [(staging)staging.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.bin SIMPLE [(staging)staging.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.bo SIMPLE [(staging)staging.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.c EXPRESSION [(staging)staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.d SIMPLE [(staging)staging.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.da EXPRESSION [(staging)staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.dec SIMPLE [(staging)staging.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.f SIMPLE [(staging)staging.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.i SIMPLE [(staging)staging.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.s SIMPLE [(staging)staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.si SIMPLE [(staging)staging.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.t SIMPLE [(staging)staging.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.ts SIMPLE [(staging)staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_ppd_staging.v EXPRESSION [(staging)staging.FieldSchema(name:s, type:string, comment:null), ]
+PREHOOK: query: -- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values
+-- which makes it hard to test bloom filters
+insert into orc_ppd_staging select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11","1990-03-11 10:11:58.703308",-71.54,"aaa" from staging limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@orc_ppd_staging
+POSTHOOK: query: -- just to introduce a gap in min/max range for bloom filters. The dataset has contiguous values
+-- which makes it hard to test bloom filters
+insert into orc_ppd_staging select -10,-321,-65680,-4294967430,-97.94,-13.07,true,"aaa","aaa","aaa","1990-03-11","1990-03-11 10:11:58.703308",-71.54,"aaa" from staging limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@orc_ppd_staging
+POSTHOOK: Lineage: orc_ppd_staging.b EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.bin EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.bo SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.c EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.d EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.da EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.dec EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.f EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.i EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.s SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.si EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.t EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.ts EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.v EXPRESSION []
+PREHOOK: query: insert into orc_ppd_staging select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11","2023-03-11 10:11:58.703308",71.54,"zzz" from staging limit 1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@staging
+PREHOOK: Output: default@orc_ppd_staging
+POSTHOOK: query: insert into orc_ppd_staging select 127,331,65690,4294967440,107.94,23.07,true,"zzz","zzz","zzz","2023-03-11","2023-03-11 10:11:58.703308",71.54,"zzz" from staging limit 1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@staging
+POSTHOOK: Output: default@orc_ppd_staging
+POSTHOOK: Lineage: orc_ppd_staging.b SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.bin EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.bo SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.c EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.d SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.da EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.dec EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.f EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.i SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.s SIMPLE []
+POSTHOOK: Lineage: orc_ppd_staging.si EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.t EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.ts EXPRESSION []
+POSTHOOK: Lineage: orc_ppd_staging.v EXPRESSION []
+PREHOOK: query: CREATE TABLE orc_ppd(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ c char(50),
+ v varchar(50),
+ da date,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*")
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@orc_ppd
+POSTHOOK: query: CREATE TABLE orc_ppd(t tinyint,
+ si smallint,
+ i int,
+ b bigint,
+ f float,
+ d double,
+ bo boolean,
+ s string,
+ c char(50),
+ v varchar(50),
+ da date,
+ ts timestamp,
+ dec decimal(4,2),
+ bin binary)
+STORED AS ORC tblproperties("orc.row.index.stride" = "1000", "orc.bloom.filter.columns"="*")
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@orc_ppd
+PREHOOK: query: insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from orc_ppd_staging order by t, s
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd_staging
+PREHOOK: Output: default@orc_ppd
+POSTHOOK: query: insert overwrite table orc_ppd select t, si, i, b, f, d, bo, s, cast(s as char(50)), cast(s as varchar(50)), cast(ts as date), ts, dec, bin from orc_ppd_staging order by t, s
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@orc_ppd_staging
+POSTHOOK: Output: default@orc_ppd
+POSTHOOK: Lineage: orc_ppd.b SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:b, type:bigint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.bin SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:bin, type:binary, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.bo SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:bo, type:boolean, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.c EXPRESSION [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.d SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:d, type:double, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.da EXPRESSION [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.dec SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:dec, type:decimal(4,2), comment:null), ]
+POSTHOOK: Lineage: orc_ppd.f SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:f, type:float, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.i SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:i, type:int, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.s SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:s, type:string, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.si SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:si, type:smallint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.t SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:t, type:tinyint, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.ts SIMPLE [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:ts, type:timestamp, comment:null), ]
+POSTHOOK: Lineage: orc_ppd.v EXPRESSION [(orc_ppd_staging)orc_ppd_staging.FieldSchema(name:s, type:string, comment:null), ]
+PREHOOK: query: -- Row group statistics for column t:
+-- Entry 0: count: 994 hasNull: true min: -10 max: 54 sum: 26014 positions: 0,0,0,0,0,0,0
+-- Entry 1: count: 1000 hasNull: false min: 54 max: 118 sum: 86812 positions: 0,2,124,0,0,116,11
+-- Entry 2: count: 100 hasNull: false min: 118 max: 127 sum: 12151 positions: 0,4,119,0,0,244,19
+
+-- INPUT_RECORDS: 2100 (all row groups)
+select count(*) from orc_ppd
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2100
+PREHOOK: query: -- INPUT_RECORDS: 0 (no row groups)
+select count(*) from orc_ppd where t > 127
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ RECORDS_OUT_0: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = 55
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+8
+PREHOOK: query: select count(*) from orc_ppd where t <=> 50
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+22
+PREHOOK: query: select count(*) from orc_ppd where t <=> 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+16
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t = "54"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+18
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = -10.0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+1
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t = cast(53 as float)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+32
+PREHOOK: query: select count(*) from orc_ppd where t = cast(53 as double)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+32
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t < 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+1697
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t < 100 and t > 98
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+12
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where t <= 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+1713
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t is null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+6
+PREHOOK: query: -- INPUT_RECORDS: 1100 (2 row groups)
+select count(*) from orc_ppd where t in (5, 120)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+50
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where t between 60 and 80
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+318
+PREHOOK: query: -- bloom filter tests
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where t = -100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ RECORDS_OUT_0: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where t <=> -100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ RECORDS_OUT_0: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where t = 125
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 0
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where t IN (-100, 125, 200)
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 0
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: -- Row group statistics for column s:
+-- Entry 0: count: 1000 hasNull: false min: max: zach young sum: 12907 positions: 0,0,0
+-- Entry 1: count: 1000 hasNull: false min: alice allen max: zach zipper sum: 12704 positions: 0,1611,191
+-- Entry 2: count: 100 hasNull: false min: bob davidson max: zzz sum: 1281 positions: 0,3246,373
+
+-- INPUT_RECORDS: 0 (no row groups)
+select count(*) from orc_ppd where s > "zzz"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ RECORDS_OUT_0: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where s = "zach young"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: select count(*) from orc_ppd where s <=> "zach zipper"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+6
+PREHOOK: query: select count(*) from orc_ppd where s <=> ""
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+6
+PREHOOK: query: -- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s is null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ RECORDS_OUT_0: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 2100
+select count(*) from orc_ppd where s is not null
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2100
+PREHOOK: query: -- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s = cast("zach young" as char(50))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 0
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 1000 (1 row group)
+select count(*) from orc_ppd where s = cast("zach young" as char(10))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as varchar(10))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: select count(*) from orc_ppd where s = cast("zach young" as varchar(50))
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s < "b"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+81
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s > "alice" and s < "bob"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+74
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s in ("alice allen", "")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+12
+PREHOOK: query: -- INPUT_RECORDS: 2000 (2 row groups)
+select count(*) from orc_ppd where s between "" and "alice allen"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 2000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+13
+PREHOOK: query: -- INPUT_RECORDS: 100 (1 row group)
+select count(*) from orc_ppd where s between "zz" and "zzz"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+1
+PREHOOK: query: -- INPUT_RECORDS: 1100 (2 row groups)
+select count(*) from orc_ppd where s between "zach zipper" and "zzz"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+7
+PREHOOK: query: -- bloom filter tests
+-- INPUT_RECORDS: 0
+select count(*) from orc_ppd where s = "hello world"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 0
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where s <=> "apache hive"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 0
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: select count(*) from orc_ppd where s IN ("a", "z")
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 0
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+0
+PREHOOK: query: -- INPUT_RECORDS: 100
+select count(*) from orc_ppd where s = "sarah ovid"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: -- INPUT_RECORDS: 1100
+select count(*) from orc_ppd where s = "wendy king"
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+6
+PREHOOK: query: -- INPUT_RECORDS: 1000
+select count(*) from orc_ppd where s = "wendy king" and t < 0
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 1000
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2
+PREHOOK: query: -- INPUT_RECORDS: 100
+select count(*) from orc_ppd where s = "wendy king" and t > 100
+PREHOOK: type: QUERY
+PREHOOK: Input: default@orc_ppd
+#### A masked pattern was here ####
+Stage-1 HIVE COUNTERS:
+ CREATED_FILES: 1
+ DESERIALIZE_ERRORS: 0
+ RECORDS_IN_Map_1: 100
+ RECORDS_OUT_0: 1
+ RECORDS_OUT_INTERMEDIATE_Map_1: 1
+2

Search Discussions

  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11436: CBO: Calcite Operator To Hive Operator (Calcite Return Path) : dealing with empty char (Pengcheng Xiong, reviewed by Jesus Camacho Rodriguez)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0b38612f
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0b38612f
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0b38612f

    Branch: refs/heads/hbase-metastore
    Commit: 0b38612f6aede1b2e87b4a3f466f27ebf3612d1e
    Parents: 5abcc6a
    Author: Pengcheng Xiong <pxiong@hortonworks.com>
    Authored: Mon Aug 10 12:42:17 2015 +0300
    Committer: Jesus Camacho Rodriguez <jcamacho@apache.org>
    Committed: Mon Aug 10 12:42:17 2015 +0300

    ----------------------------------------------------------------------
      .../calcite/translator/ExprNodeConverter.java | 18 ++----------------
      1 file changed, 2 insertions(+), 16 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/0b38612f/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
    index b6a79db..00bf009 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ExprNodeConverter.java
    @@ -225,23 +225,9 @@ public class ExprNodeConverter extends RexVisitorImpl<ExprNodeDesc> {
          case DECIMAL:
            return new ExprNodeConstantDesc(TypeInfoFactory.getDecimalTypeInfo(lType.getPrecision(),
                lType.getScale()), HiveDecimal.create((BigDecimal)literal.getValue3()));
    - case VARCHAR: {
    - int varcharLength = lType.getPrecision();
    - // If we cannot use Varchar due to type length restrictions, we use String
    - if (varcharLength < 1 || varcharLength > HiveVarchar.MAX_VARCHAR_LENGTH) {
    - return new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, literal.getValue3());
    - }
    - return new ExprNodeConstantDesc(TypeInfoFactory.getVarcharTypeInfo(varcharLength),
    - new HiveVarchar((String) literal.getValue3(), varcharLength));
    - }
    + case VARCHAR:
          case CHAR: {
    - int charLength = lType.getPrecision();
    - // If we cannot use Char due to type length restrictions, we use String
    - if (charLength < 1 || charLength > HiveChar.MAX_CHAR_LENGTH) {
    - return new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, literal.getValue3());
    - }
    - return new ExprNodeConstantDesc(TypeInfoFactory.getCharTypeInfo(charLength),
    - new HiveChar((String) literal.getValue3(), charLength));
    + return new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, literal.getValue3());
          }
          case INTERVAL_YEAR_MONTH: {
            BigDecimal monthsBd = (BigDecimal) literal.getValue();
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11397: Parse Hive OR clauses as they are written into the AST (Jesus Camacho Rodriguez, reviewed by Hari Sankar Sivarama Subramaniyan)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/5abcc6a2
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/5abcc6a2
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/5abcc6a2

    Branch: refs/heads/hbase-metastore
    Commit: 5abcc6a2768793e4b3da95c6c5edb741d6580e65
    Parents: 7536ede
    Author: Jesus Camacho Rodriguez <jcamacho@apache.org>
    Authored: Mon Aug 10 09:44:49 2015 +0300
    Committer: Jesus Camacho Rodriguez <jcamacho@apache.org>
    Committed: Mon Aug 10 09:44:49 2015 +0300

    ----------------------------------------------------------------------
      .../hadoop/hive/ql/parse/SemanticAnalyzer.java | 2 +-
      .../groupby_multi_single_reducer2.q.out | 2 +-
      .../groupby_multi_single_reducer3.q.out | 8 +++----
      .../results/clientpositive/multi_insert.q.out | 8 +++----
      .../clientpositive/multi_insert_gby.q.out | 2 +-
      .../multi_insert_lateral_view.q.out | 4 ++--
      ...i_insert_move_tasks_share_dependencies.q.out | 24 ++++++++++----------
      .../spark/groupby_multi_single_reducer2.q.out | 2 +-
      .../spark/groupby_multi_single_reducer3.q.out | 8 +++----
      .../clientpositive/spark/multi_insert.q.out | 8 +++----
      .../clientpositive/spark/multi_insert_gby.q.out | 2 +-
      .../spark/multi_insert_lateral_view.q.out | 4 ++--
      ...i_insert_move_tasks_share_dependencies.q.out | 24 ++++++++++----------
      13 files changed, 49 insertions(+), 49 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    index 0a780af..fe7c1ca 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    @@ -5370,8 +5370,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {

              GenericUDFOPOr or = new GenericUDFOPOr();
              List<ExprNodeDesc> expressions = new ArrayList<ExprNodeDesc>(2);
    - expressions.add(previous);
              expressions.add(current);
    + expressions.add(previous);
              ExprNodeDesc orExpr =
                  new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, or, expressions);
              previous = orExpr;

    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
    index 2377cd5..972ed51 100644
    --- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
    +++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer2.q.out
    @@ -43,7 +43,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((substr(key, 1, 1) >= 5) or (substr(key, 1, 1) < 5)) (type: boolean)
    + predicate: ((substr(key, 1, 1) < 5) or (substr(key, 1, 1) >= 5)) (type: boolean)
                    Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: substr(key, 1, 1) (type: string), key (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
    index ca0d524..616eaa3 100644
    --- a/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
    +++ b/ql/src/test/results/clientpositive/groupby_multi_single_reducer3.q.out
    @@ -59,7 +59,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200)) or ((value) IN ('val_400', 'val_500') and (key) IN (400, 450))) (type: boolean)
    + predicate: (((value) IN ('val_400', 'val_500') and (key) IN (400, 450)) or ((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200))) (type: boolean)
                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string)
    @@ -225,7 +225,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null)) or (((key + key) = 400) or (((key - 100) = 500) and value is not null))) (type: boolean)
    + predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or ((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null))) (type: boolean)
                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: value (type: string)
    @@ -391,7 +391,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200)) or ((value) IN ('val_400', 'val_500') and (key) IN (400, 450))) (type: boolean)
    + predicate: (((value) IN ('val_400', 'val_500') and (key) IN (400, 450)) or ((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200))) (type: boolean)
                    Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string)
    @@ -557,7 +557,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null)) or (((key + key) = 400) or (((key - 100) = 500) and value is not null))) (type: boolean)
    + predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or ((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null))) (type: boolean)
                    Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: value (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/multi_insert.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/multi_insert.q.out b/ql/src/test/results/clientpositive/multi_insert.q.out
    index 6f321c3..f8fc172 100644
    --- a/ql/src/test/results/clientpositive/multi_insert.q.out
    +++ b/ql/src/test/results/clientpositive/multi_insert.q.out
    @@ -755,7 +755,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -905,7 +905,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -1123,7 +1123,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -1273,7 +1273,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/multi_insert_gby.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/multi_insert_gby.q.out b/ql/src/test/results/clientpositive/multi_insert_gby.q.out
    index 3c51f58..7c5e589 100644
    --- a/ql/src/test/results/clientpositive/multi_insert_gby.q.out
    +++ b/ql/src/test/results/clientpositive/multi_insert_gby.q.out
    @@ -47,7 +47,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key > 450) or (key > 500)) (type: boolean)
    + predicate: ((key > 500) or (key > 450)) (type: boolean)
                    Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
    index 121f78c..4723153 100644
    --- a/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
    +++ b/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out
    @@ -671,7 +671,7 @@ STAGE PLANS:
                              Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE
                              value expressions: _col1 (type: double)
                  Filter Operator
    - predicate: ((key > 200) or (key < 200)) (type: boolean)
    + predicate: ((key < 200) or (key > 200)) (type: boolean)
                    Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
                    File Output Operator
                      compressed: false
    @@ -1342,7 +1342,7 @@ STAGE PLANS:
                                  output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
                                  serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
                  Filter Operator
    - predicate: ((key > 200) or (key < 200)) (type: boolean)
    + predicate: ((key < 200) or (key > 200)) (type: boolean)
                    Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
                    File Output Operator
                      compressed: false

    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
    index 8f9dd12..935ae75 100644
    --- a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
    +++ b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
    @@ -772,7 +772,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -926,7 +926,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -1148,7 +1148,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -1302,7 +1302,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -2835,7 +2835,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -2971,7 +2971,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -3107,7 +3107,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -3243,7 +3243,7 @@ STAGE PLANS:
                  alias: src
                  Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -3417,7 +3417,7 @@ STAGE PLANS:
                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                            name: default.src_multi2
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -3664,7 +3664,7 @@ STAGE PLANS:
                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                            name: default.src_multi2
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -3921,7 +3921,7 @@ STAGE PLANS:
                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                            name: default.src_multi2
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
    @@ -4256,7 +4256,7 @@ STAGE PLANS:
                            serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                            name: default.src_multi2
                  Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                    Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
    index 7903302..5d15040 100644
    --- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
    +++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer2.q.out
    @@ -48,7 +48,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((substr(key, 1, 1) >= 5) or (substr(key, 1, 1) < 5)) (type: boolean)
    + predicate: ((substr(key, 1, 1) < 5) or (substr(key, 1, 1) >= 5)) (type: boolean)
                          Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: substr(key, 1, 1) (type: string), key (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
    index 4ac7009..5192dbb 100644
    --- a/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
    +++ b/ql/src/test/results/clientpositive/spark/groupby_multi_single_reducer3.q.out
    @@ -64,7 +64,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200)) or ((value) IN ('val_400', 'val_500') and (key) IN (400, 450))) (type: boolean)
    + predicate: (((value) IN ('val_400', 'val_500') and (key) IN (400, 450)) or ((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200))) (type: boolean)
                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string)
    @@ -236,7 +236,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null)) or (((key + key) = 400) or (((key - 100) = 500) and value is not null))) (type: boolean)
    + predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or ((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null))) (type: boolean)
                          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: value (type: string)
    @@ -408,7 +408,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200)) or ((value) IN ('val_400', 'val_500') and (key) IN (400, 450))) (type: boolean)
    + predicate: (((value) IN ('val_400', 'val_500') and (key) IN (400, 450)) or ((value) IN ('val_100', 'val_200', 'val_300') and (key) IN (100, 150, 200))) (type: boolean)
                          Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string)
    @@ -580,7 +580,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null)) or (((key + key) = 400) or (((key - 100) = 500) and value is not null))) (type: boolean)
    + predicate: ((((key + key) = 400) or (((key - 100) = 500) and value is not null)) or ((((key + key) = 200) or ((key - 100) = 100)) or ((key = 300) and value is not null))) (type: boolean)
                          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: value (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/spark/multi_insert.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/multi_insert.q.out b/ql/src/test/results/clientpositive/spark/multi_insert.q.out
    index c77a691..117133a 100644
    --- a/ql/src/test/results/clientpositive/spark/multi_insert.q.out
    +++ b/ql/src/test/results/clientpositive/spark/multi_insert.q.out
    @@ -596,7 +596,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -742,7 +742,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -888,7 +888,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -1034,7 +1034,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
    index 6eae46e..9eeabb4 100644
    --- a/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
    +++ b/ql/src/test/results/clientpositive/spark/multi_insert_gby.q.out
    @@ -52,7 +52,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key > 450) or (key > 500)) (type: boolean)
    + predicate: ((key > 500) or (key > 450)) (type: boolean)
                          Statistics: Num rows: 332 Data size: 3527 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
    index 6dc5747..6aec979 100644
    --- a/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
    +++ b/ql/src/test/results/clientpositive/spark/multi_insert_lateral_view.q.out
    @@ -597,7 +597,7 @@ STAGE PLANS:
                        alias: src_10
                        Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key > 200) or (key < 200)) (type: boolean)
    + predicate: ((key < 200) or (key > 200)) (type: boolean)
                          Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string)
    @@ -1267,7 +1267,7 @@ STAGE PLANS:
                        alias: src_10
                        Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key > 200) or (key < 200)) (type: boolean)
    + predicate: ((key < 200) or (key > 200)) (type: boolean)
                          Statistics: Num rows: 6 Data size: 62 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: value (type: string), key (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/5abcc6a2/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
    index cddf923..2bcf1bf 100644
    --- a/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
    +++ b/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
    @@ -613,7 +613,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -763,7 +763,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -913,7 +913,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -1063,7 +1063,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -2365,7 +2365,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -2492,7 +2492,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -2619,7 +2619,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -2746,7 +2746,7 @@ STAGE PLANS:
                        alias: src
                        Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -2911,7 +2911,7 @@ STAGE PLANS:
                                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                                  name: default.src_multi2
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -3149,7 +3149,7 @@ STAGE PLANS:
                                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                                  name: default.src_multi2
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -3387,7 +3387,7 @@ STAGE PLANS:
                                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                                  name: default.src_multi2
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
    @@ -3625,7 +3625,7 @@ STAGE PLANS:
                                  serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                                  name: default.src_multi2
                        Filter Operator
    - predicate: ((key < 10) or ((key > 10) and (key < 20))) (type: boolean)
    + predicate: (((key > 10) and (key < 20)) or (key < 10)) (type: boolean)
                          Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
  • Sershe at Aug 17, 2015 at 10:00 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
    index 5446ba5..f2cb3ec 100644
    --- a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
    +++ b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
    @@ -147,7 +147,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((762 = cbigint) or (((UDFToFloat(csmallint) < cfloat) and ((UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint)))) or ((cstring1 = 'a') or ((UDFToDouble(cbigint) <= -1.389) and ((cstring2 <> 'a') and ((79.553 <> UDFToDouble(cint)) and (cboolean2 <> cboolean1))))))) (type: boolean)
    + predicate: ((762 = cbigint) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint))) or (cstring1 = 'a') or ((UDFToDouble(cbigint) <= -1.389) and (cstring2 <> 'a') and (79.553 <> UDFToDouble(cint)) and (cboolean2 <> cboolean1))) (type: boolean)
                    Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cint (type: int), cdouble (type: double), csmallint (type: smallint), cfloat (type: float), ctinyint (type: tinyint)
    @@ -353,7 +353,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or (((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or (((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))))) (type: boolean)
    + predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or ((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or ((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))) (type: boolean)
                    Statistics: Num rows: 6826 Data size: 1467614 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cint (type: int), cbigint (type: bigint), csmallint (type: smallint), cdouble (type: double), ctinyint (type: tinyint)
    @@ -550,7 +550,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((ctimestamp1 = ctimestamp2) or ((762.0 = cfloat) or ((cstring1 = 'ss') or (((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and (ctimestamp2 is not null and (cstring2 > 'a'))))))) (type: boolean)
    + predicate: ((ctimestamp1 = ctimestamp2) or (762.0 = cfloat) or (cstring1 = 'ss') or ((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and ctimestamp2 is not null and (cstring2 > 'a'))) (type: boolean)
                    Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cbigint (type: bigint), ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cdouble (type: double)
    @@ -726,7 +726,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((ctimestamp2 <= ctimestamp1) and ((UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1))) or (((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17.0))) (type: boolean)
    + predicate: (((ctimestamp2 <= ctimestamp1) and (UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1)) or ((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17.0)) (type: boolean)
                    Statistics: Num rows: 8874 Data size: 1907941 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: ctinyint (type: tinyint), cbigint (type: bigint), cint (type: int), cfloat (type: float)
    @@ -910,7 +910,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or (((1 <> cboolean2) and ((UDFToDouble(csmallint) < 79.553) and (-257 <> UDFToInteger(ctinyint)))) or (((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))))) (type: boolean)
    + predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or ((1 <> cboolean2) and (UDFToDouble(csmallint) < 79.553) and (-257 <> UDFToInteger(ctinyint))) or ((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))) (type: boolean)
                    Statistics: Num rows: 9898 Data size: 2128105 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cint (type: int), cdouble (type: double), ctimestamp2 (type: timestamp), cstring1 (type: string), cboolean2 (type: boolean), ctinyint (type: tinyint), cfloat (type: float), ctimestamp1 (type: timestamp), csmallint (type: smallint), cbigint (type: bigint), (-3728 * cbigint) (type: bigint), (- cint) (type: int), (-863.257 - UDFToDouble(cint)) (type: double), (- csmallint) (type: smallint), (csmallint - (- csmallint)) (type: smallint), ((csmallint - (- csmallint)) + (- csmallint)) (type: smallint), (UDFToDouble(cint) / UDFToDouble(cint)) (type: double), ((-863.257 - UDFToDouble(cint)) - -26.28) (type: double), (- cfloat) (type: float), (cdouble * -89010.0) (type: double), (UDFToDouble(ctinyint) / 988888.0) (type: double), (- ctinyint) (type: tinyint), (79.553 / UDFToDouble(ctinyint)) (type: double)
    @@ -1161,7 +1161,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((197.0 > UDFToDouble(ctinyint)) and (UDFToLong(cint) = cbigint)) or ((cbigint = 359) or ((cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))))) (type: boolean)
    + predicate: (((197.0 > UDFToDouble(ctinyint)) and (UDFToLong(cint) = cbigint)) or (cbigint = 359) or (cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))) (type: boolean)
                    Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cboolean1 (type: boolean), cfloat (type: float), cdouble (type: double), ctimestamp2 (type: timestamp), csmallint (type: smallint), cstring2 (type: string), cboolean2 (type: boolean), (UDFToDouble(cint) / UDFToDouble(cbigint)) (type: double), (UDFToDouble(cbigint) % 79.553) (type: double), (- (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (10.175 % UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cfloat - (- cfloat)) (type: float), ((cfloat - (- cfloat)) % -6432.0) (type: float), (cdouble * UDFToDouble(csmallint)) (type: double), (- cdouble) (type: double), (- cbigint) (type: bigint), (UDFToDouble(cfloat) - (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (- csmallint) (type: smallint), (3569 % cbigint) (type: bigint), (359.0 - cdouble) (type: double), (- csmallint) (type: smallint)
    @@ -1361,7 +1361,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((UDFToDouble(csmallint) > -26.28) and (cstring2 like 'ss')) or (((cdouble <= UDFToDouble(cbigint)) and ((cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble))) or ((UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= UDFToDouble(csmallint)))))) (type: boolean)
    + predicate: (((UDFToDouble(csmallint) > -26.28) and (cstring2 like 'ss')) or ((cdouble <= UDFToDouble(cbigint)) and (cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble)) or (UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= UDFToDouble(csmallint)))) (type: boolean)
                    Statistics: Num rows: 10922 Data size: 2348269 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cint (type: int), cstring1 (type: string), cboolean2 (type: boolean), ctimestamp2 (type: timestamp), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), cboolean1 (type: boolean), (cint + UDFToInteger(csmallint)) (type: int), (cbigint - UDFToLong(ctinyint)) (type: bigint), (- cbigint) (type: bigint), (- cfloat) (type: float), ((cbigint - UDFToLong(ctinyint)) + cbigint) (type: bigint), (cdouble / cdouble) (type: double), (- cdouble) (type: double), (UDFToLong((cint + UDFToInteger(csmallint))) * (- cbigint)) (type: bigint), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (-1.389 / UDFToDouble(ctinyint)) (type: double), (UDFToDouble(cbigint) % cdouble) (type: double), (- csmallint) (type: smallint), (UDFToInteger(csmallint) + (cint + UDFToInteger(csmallint))) (type: int)
    @@ -1620,7 +1620,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((-1.389 >= UDFToDouble(cint)) and ((csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint)))) or (((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > UDFToDouble(cbigint))))) (type: boolean)
    + predicate: (((-1.389 >= UDFToDouble(cint)) and (csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint))) or ((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > UDFToDouble(cbigint)))) (type: boolean)
                    Statistics: Num rows: 3868 Data size: 831633 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: ctimestamp1 (type: timestamp), cstring2 (type: string), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), (UDFToDouble(cbigint) / 3569.0) (type: double), (-257 - UDFToInteger(csmallint)) (type: int), (-6432.0 * cfloat) (type: float), (- cdouble) (type: double), (cdouble * 10.175) (type: double), (UDFToDouble((-6432.0 * cfloat)) / UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cint % UDFToInteger(csmallint)) (type: int), (- cdouble) (type: double), (cdouble * (- cdouble)) (type: double)
    @@ -2031,7 +2031,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and ((UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15))) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
    + predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
                    Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cdouble (type: double), cfloat (type: float)
    @@ -2289,7 +2289,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and (cboolean2 is not null and (cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))))) or ((UDFToDouble(ctimestamp2) = -5.0) or (((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or ((cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint)))))))) (type: boolean)
    + predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
                    Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: ctimestamp1 (type: timestamp), cstring1 (type: string), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
    @@ -2624,7 +2624,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (cboolean1 is not null and (((cdouble < UDFToDouble(csmallint)) and ((cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257))) or (((cint >= -257) and (cstring1 is not null and (cboolean1 >= 1))) or (cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null))))) (type: boolean)
    + predicate: (cboolean1 is not null and (((cdouble < UDFToDouble(csmallint)) and (cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257)) or ((cint >= -257) and cstring1 is not null and (cboolean1 >= 1)) or cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null))) (type: boolean)
                    Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cboolean1 (type: boolean), cfloat (type: float), cbigint (type: bigint), cint (type: int), cdouble (type: double), ctinyint (type: tinyint), csmallint (type: smallint)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorized_case.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/vectorized_case.q.out b/ql/src/test/results/clientpositive/vectorized_case.q.out
    index 9f547d1..73bf12d 100644
    --- a/ql/src/test/results/clientpositive/vectorized_case.q.out
    +++ b/ql/src/test/results/clientpositive/vectorized_case.q.out
    @@ -46,7 +46,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((csmallint = 418) or ((csmallint = 12205) or (csmallint = 10583))) (type: boolean)
    + predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
                    Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11376 : Removes legacy code wrt skipping files with codec to HiveInputFormat from CombineHiveInputFormat (Rajat Khandelwal, reviewed by Amareshwari)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cfda5700
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cfda5700
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cfda5700

    Branch: refs/heads/hbase-metastore
    Commit: cfda5700a715b71f5c4c6b325d4adaa213ff7618
    Parents: 6df52ed
    Author: Rajat Khandelwal <prongs@apache.org>
    Authored: Mon Aug 10 17:51:09 2015 +0530
    Committer: Amareshwari Sriramadasu <amareshwari@apache.org>
    Committed: Mon Aug 10 17:51:09 2015 +0530

    ----------------------------------------------------------------------
      .../org/apache/hadoop/hive/conf/HiveConf.java | 2 -
      .../hive/ql/io/CombineHiveInputFormat.java | 39 --------------------
      .../hive/ql/optimizer/GenMapRedUtils.java | 2 -
      .../org/apache/hadoop/hive/ql/plan/MapWork.java | 10 -----
      .../apache/hadoop/hive/ql/plan/PlanUtils.java | 9 +----
      5 files changed, 1 insertion(+), 61 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/cfda5700/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    ----------------------------------------------------------------------
    diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    index 36bb394..9cc7987 100644
    --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    @@ -1175,8 +1175,6 @@ public class HiveConf extends Configuration {
          HIVEROWOFFSET("hive.exec.rowoffset", false,
              "Whether to provide the row offset virtual column"),

    - HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE("hive.hadoop.supports.splittable.combineinputformat", false, ""),
    -
          // Optimizer
          HIVEOPTINDEXFILTER("hive.optimize.index.filter", false,
              "Whether to enable automatic use of indexes"),

    http://git-wip-us.apache.org/repos/asf/hive/blob/cfda5700/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
    index e13c4dd..11740d1 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java
    @@ -374,45 +374,6 @@ public class CombineHiveInputFormat<K extends WritableComparable, V extends Writ
            }
            FileSystem inpFs = path.getFileSystem(job);

    - // Since there is no easy way of knowing whether MAPREDUCE-1597 is present in the tree or not,
    - // we use a configuration variable for the same
    - if (this.mrwork != null && !this.mrwork.getHadoopSupportsSplittable()) {
    - // The following code should be removed, once
    - // https://issues.apache.org/jira/browse/MAPREDUCE-1597 is fixed.
    - // Hadoop does not handle non-splittable files correctly for CombineFileInputFormat,
    - // so don't use CombineFileInputFormat for non-splittable files
    -
    - //ie, dont't combine if inputformat is a TextInputFormat and has compression turned on
    -
    - if (inputFormat instanceof TextInputFormat) {
    - Queue<Path> dirs = new LinkedList<Path>();
    - FileStatus fStats = inpFs.getFileStatus(path);
    -
    - // If path is a directory
    - if (fStats.isDir()) {
    - dirs.offer(path);
    - } else if ((new CompressionCodecFactory(job)).getCodec(path) != null) {
    - //if compresssion codec is set, use HiveInputFormat.getSplits (don't combine)
    - splits = super.getSplits(job, numSplits);
    - return splits;
    - }
    -
    - while (dirs.peek() != null) {
    - Path tstPath = dirs.remove();
    - FileStatus[] fStatus = inpFs.listStatus(tstPath, FileUtils.HIDDEN_FILES_PATH_FILTER);
    - for (int idx = 0; idx < fStatus.length; idx++) {
    - if (fStatus[idx].isDir()) {
    - dirs.offer(fStatus[idx].getPath());
    - } else if ((new CompressionCodecFactory(job)).getCodec(
    - fStatus[idx].getPath()) != null) {
    - //if compresssion codec is set, use HiveInputFormat.getSplits (don't combine)
    - splits = super.getSplits(job, numSplits);
    - return splits;
    - }
    - }
    - }
    - }
    - }
            //don't combine if inputformat is a SymlinkTextInputFormat
            if (inputFormat instanceof SymlinkTextInputFormat) {
              splits = super.getSplits(job, numSplits);

    http://git-wip-us.apache.org/repos/asf/hive/blob/cfda5700/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    index 693d8c7..4a325fb 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    @@ -933,8 +933,6 @@ public final class GenMapRedUtils {
          work.setPathToAliases(new LinkedHashMap<String, ArrayList<String>>());
          work.setPathToPartitionInfo(new LinkedHashMap<String, PartitionDesc>());
          work.setAliasToWork(new LinkedHashMap<String, Operator<? extends OperatorDesc>>());
    - work.setHadoopSupportsSplittable(
    - conf.getBoolVar(HiveConf.ConfVars.HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE));
          return mrWork;
        }


    http://git-wip-us.apache.org/repos/asf/hive/blob/cfda5700/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
    index 2cb9257..bc9b645 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapWork.java
    @@ -63,8 +63,6 @@ public class MapWork extends BaseWork {

        private static final Log LOG = LogFactory.getLog(MapWork.class);

    - private boolean hadoopSupportsSplittable;
    -
        // use LinkedHashMap to make sure the iteration order is
        // deterministic, to ease testing
        private LinkedHashMap<String, ArrayList<String>> pathToAliases = new LinkedHashMap<String, ArrayList<String>>();
    @@ -421,14 +419,6 @@ public class MapWork extends BaseWork {
          return this.mapperCannotSpanPartns;
        }

    - public boolean getHadoopSupportsSplittable() {
    - return hadoopSupportsSplittable;
    - }
    -
    - public void setHadoopSupportsSplittable(boolean hadoopSupportsSplittable) {
    - this.hadoopSupportsSplittable = hadoopSupportsSplittable;
    - }
    -
        public String getIndexIntermediateFile() {
          return indexIntermediateFile;
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/cfda5700/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
    index 76926e7..b50eaab 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java
    @@ -99,14 +99,7 @@ public final class PlanUtils {

        @SuppressWarnings("nls")
        public static MapredWork getMapRedWork() {
    - try {
    - MapredWork work = new MapredWork();
    - work.getMapWork().setHadoopSupportsSplittable(Hive.get().getConf().getBoolVar(
    - HiveConf.ConfVars.HIVE_COMBINE_INPUT_FORMAT_SUPPORTS_SPLITTABLE));
    - return work;
    - } catch (HiveException ex) {
    - throw new RuntimeException(ex);
    - }
    + return new MapredWork();
        }

        public static TableDesc getDefaultTableDesc(CreateTableDesc directoryDesc,
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11387: CBO: Calcite Operator To Hive Operator (Calcite Return Path) : fix reduce_deduplicate optimization (Pengcheng Xiong, reviewed by Jesus Camacho Rodriguez, Hari Subramaniyan)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/538ae703
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/538ae703
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/538ae703

    Branch: refs/heads/hbase-metastore
    Commit: 538ae7036f2fe21e47e384523d48392d383e95e8
    Parents: bddbd1d
    Author: Hari Subramaniyan <harisankar@apache.org>
    Authored: Mon Aug 10 18:00:28 2015 -0700
    Committer: Hari Subramaniyan <harisankar@apache.org>
    Committed: Mon Aug 10 18:00:28 2015 -0700

    ----------------------------------------------------------------------
      .../correlation/AbstractCorrelationProcCtx.java | 7 +
      .../correlation/CorrelationUtilities.java | 11 +-
      .../correlation/ReduceSinkDeDuplication.java | 6 +-
      ...i_insert_move_tasks_share_dependencies.q.out | 336 +++---------
      ql/src/test/results/clientpositive/ptf.q.out | 27 +-
      ...i_insert_move_tasks_share_dependencies.q.out | 512 +++++++------------
      .../test/results/clientpositive/spark/ptf.q.out | 17 +-
      .../spark/union_remove_6_subq.q.out | 22 +-
      .../clientpositive/spark/vectorized_ptf.q.out | 21 +-
      .../clientpositive/tez/explainuser_1.q.out | 69 ++-
      .../test/results/clientpositive/tez/ptf.q.out | 15 +-
      .../clientpositive/tez/vectorized_ptf.q.out | 19 +-
      .../clientpositive/union_remove_6_subq.q.out | 34 +-
      .../results/clientpositive/vectorized_ptf.q.out | 67 +--
      14 files changed, 327 insertions(+), 836 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java
    index 174685b..5b673df 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java
    @@ -20,6 +20,7 @@ package org.apache.hadoop.hive.ql.optimizer.correlation;

      import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATIONMINREDUCER;
      import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESCRIPTOPERATORTRUST;
    +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE;

      import java.util.HashSet;
      import java.util.Set;
    @@ -39,11 +40,13 @@ abstract class AbstractCorrelationProcCtx implements NodeProcessorCtx {
        // only one reducer if this configuration does not prevents
        private final int minReducer;
        private final Set<Operator<?>> removedOps;
    + private final boolean isMapAggr;

        public AbstractCorrelationProcCtx(ParseContext pctx) {
          removedOps = new HashSet<Operator<?>>();
          trustScript = pctx.getConf().getBoolVar(HIVESCRIPTOPERATORTRUST);
          minReducer = pctx.getConf().getIntVar(HIVEOPTREDUCEDEDUPLICATIONMINREDUCER);
    + isMapAggr = pctx.getConf().getBoolVar(HIVEMAPSIDEAGGREGATE);
          this.pctx = pctx;
        }

    @@ -70,4 +73,8 @@ abstract class AbstractCorrelationProcCtx implements NodeProcessorCtx {
        public boolean addRemovedOperator(Operator<?> rsOp) {
          return removedOps.add(rsOp);
        }
    +
    + public boolean isMapAggr() {
    + return isMapAggr;
    + }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
    index 64bef21..7bb49be 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationUtilities.java
    @@ -29,6 +29,7 @@ import java.util.Map;
      import java.util.Map.Entry;
      import java.util.Set;

    +import org.apache.hadoop.hive.conf.HiveConf;
      import org.apache.hadoop.hive.ql.exec.ColumnInfo;
      import org.apache.hadoop.hive.ql.exec.FilterOperator;
      import org.apache.hadoop.hive.ql.exec.ForwardOperator;
    @@ -44,6 +45,7 @@ import org.apache.hadoop.hive.ql.exec.TableScanOperator;
      import org.apache.hadoop.hive.ql.exec.Utilities;
      import org.apache.hadoop.hive.ql.exec.Utilities.ReduceField;
      import org.apache.hadoop.hive.ql.metadata.HiveException;
    +import org.apache.hadoop.hive.ql.optimizer.correlation.ReduceSinkDeDuplication.ReduceSinkDeduplicateProcCtx;
      import org.apache.hadoop.hive.ql.parse.ParseContext;
      import org.apache.hadoop.hive.ql.parse.SemanticException;
      import org.apache.hadoop.hive.ql.plan.AggregationDesc;
    @@ -163,10 +165,10 @@ public final class CorrelationUtilities {
          return type.isInstance(parent) ? (T)parent : null;
        }

    - protected static Operator<?> getStartForGroupBy(ReduceSinkOperator cRS)
    + protected static Operator<?> getStartForGroupBy(ReduceSinkOperator cRS, ReduceSinkDeduplicateProcCtx dedupCtx)
            throws SemanticException {
          Operator<? extends Serializable> parent = getSingleParent(cRS);
    - return parent instanceof GroupByOperator ? parent : cRS; // skip map-aggr GBY
    + return parent instanceof GroupByOperator && dedupCtx.isMapAggr() ? parent : cRS; // skip map-aggr GBY
        }


    @@ -240,6 +242,7 @@ public final class CorrelationUtilities {
    cursor instanceof FilterOperator
    cursor instanceof ForwardOperator
    cursor instanceof ScriptOperator
    + || cursor instanceof GroupByOperator
    cursor instanceof ReduceSinkOperator)) {
              return null;
            }
    @@ -395,7 +398,7 @@ public final class CorrelationUtilities {

          Operator<?> parent = getSingleParent(cRS);

    - if (parent instanceof GroupByOperator) {
    + if ((parent instanceof GroupByOperator) && procCtx.isMapAggr()) {
            // pRS-cGBYm-cRS-cGBYr (map aggregation) --> pRS-cGBYr(COMPLETE)
            // copies desc of cGBYm to cGBYr and remove cGBYm and cRS
            GroupByOperator cGBYm = (GroupByOperator) parent;
    @@ -440,7 +443,7 @@ public final class CorrelationUtilities {
          removeOperator(cRS, cGBYr, parent, context);
          procCtx.addRemovedOperator(cRS);

    - if (parent instanceof GroupByOperator) {
    + if ((parent instanceof GroupByOperator) && procCtx.isMapAggr()) {
            removeOperator(parent, cGBYr, getSingleParent(parent), context);
            procCtx.addRemovedOperator(cGBYr);
          }

    http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
    index 7b5f9b2..56334ed 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java
    @@ -500,7 +500,7 @@ public class ReduceSinkDeDuplication implements Transform {
          public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY,
              ReduceSinkDeduplicateProcCtx dedupCtx)
              throws SemanticException {
    - Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS);
    + Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS, dedupCtx);
            GroupByOperator pGBY =
                CorrelationUtilities.findPossibleParent(
                    start, GroupByOperator.class, dedupCtx.trustScript());
    @@ -547,7 +547,7 @@ public class ReduceSinkDeDuplication implements Transform {
          public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY,
              ReduceSinkDeduplicateProcCtx dedupCtx)
              throws SemanticException {
    - Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS);
    + Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS, dedupCtx);
            JoinOperator pJoin =
                CorrelationUtilities.findPossibleParent(
                    start, JoinOperator.class, dedupCtx.trustScript());
    @@ -590,7 +590,7 @@ public class ReduceSinkDeDuplication implements Transform {
          public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY,
              ReduceSinkDeduplicateProcCtx dedupCtx)
              throws SemanticException {
    - Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS);
    + Operator<?> start = CorrelationUtilities.getStartForGroupBy(cRS, dedupCtx);
            ReduceSinkOperator pRS =
                CorrelationUtilities.findPossibleParent(
                    start, ReduceSinkOperator.class, dedupCtx.trustScript());

    http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
    index 935ae75..81d9808 100644
    --- a/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
    +++ b/ql/src/test/results/clientpositive/multi_insert_move_tasks_share_dependencies.q.out
    @@ -2821,11 +2821,10 @@ from src
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
        Stage-2 is a root stage
    - Stage-3 depends on stages: Stage-2
    - Stage-4 depends on stages: Stage-3, Stage-5
    - Stage-0 depends on stages: Stage-3
    - Stage-5 depends on stages: Stage-2
    - Stage-1 depends on stages: Stage-5
    + Stage-3 depends on stages: Stage-2, Stage-4
    + Stage-0 depends on stages: Stage-2
    + Stage-4 depends on stages: Stage-2
    + Stage-1 depends on stages: Stage-4

      STAGE PLANS:
        Stage: Stage-2
    @@ -2840,7 +2839,7 @@ STAGE PLANS:
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
                      sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Forward
    @@ -2855,10 +2854,11 @@ STAGE PLANS:
                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                    File Output Operator
                      compressed: false
    + Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                      table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                Filter Operator
                  predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                  Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -2875,29 +2875,6 @@ STAGE PLANS:
                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe

        Stage: Stage-3
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -
    - Stage: Stage-4
          Dependency Collection

        Stage: Stage-0
    @@ -2906,7 +2883,7 @@ STAGE PLANS:
                hdfs directory: false
      #### A masked pattern was here ####

    - Stage: Stage-5
    + Stage: Stage-4
          Map Reduce
            Map Operator Tree:
                TableScan
    @@ -2957,11 +2934,10 @@ from src
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
        Stage-2 is a root stage
    - Stage-3 depends on stages: Stage-2
    - Stage-4 depends on stages: Stage-3, Stage-5
    - Stage-0 depends on stages: Stage-3
    - Stage-5 depends on stages: Stage-2
    - Stage-1 depends on stages: Stage-5
    + Stage-3 depends on stages: Stage-2, Stage-4
    + Stage-0 depends on stages: Stage-2
    + Stage-4 depends on stages: Stage-2
    + Stage-1 depends on stages: Stage-4

      STAGE PLANS:
        Stage: Stage-2
    @@ -2976,7 +2952,7 @@ STAGE PLANS:
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
                      sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Forward
    @@ -2991,10 +2967,11 @@ STAGE PLANS:
                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                    File Output Operator
                      compressed: false
    + Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                      table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                Filter Operator
                  predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                  Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -3011,29 +2988,6 @@ STAGE PLANS:
                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe

        Stage: Stage-3
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -
    - Stage: Stage-4
          Dependency Collection

        Stage: Stage-0
    @@ -3042,7 +2996,7 @@ STAGE PLANS:
                hdfs directory: false
      #### A masked pattern was here ####

    - Stage: Stage-5
    + Stage: Stage-4
          Map Reduce
            Map Operator Tree:
                TableScan
    @@ -3093,11 +3047,10 @@ from src
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
        Stage-2 is a root stage
    - Stage-3 depends on stages: Stage-2
    - Stage-4 depends on stages: Stage-3, Stage-5
    - Stage-0 depends on stages: Stage-3
    - Stage-5 depends on stages: Stage-2
    - Stage-1 depends on stages: Stage-5
    + Stage-3 depends on stages: Stage-2, Stage-4
    + Stage-0 depends on stages: Stage-2
    + Stage-4 depends on stages: Stage-2
    + Stage-1 depends on stages: Stage-4

      STAGE PLANS:
        Stage: Stage-2
    @@ -3112,7 +3065,7 @@ STAGE PLANS:
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
                      sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Forward
    @@ -3127,10 +3080,11 @@ STAGE PLANS:
                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                    File Output Operator
                      compressed: false
    + Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                      table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                Filter Operator
                  predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                  Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -3147,29 +3101,6 @@ STAGE PLANS:
                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe

        Stage: Stage-3
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -
    - Stage: Stage-4
          Dependency Collection

        Stage: Stage-0
    @@ -3178,7 +3109,7 @@ STAGE PLANS:
                hdfs directory: false
      #### A masked pattern was here ####

    - Stage: Stage-5
    + Stage: Stage-4
          Map Reduce
            Map Operator Tree:
                TableScan
    @@ -3229,11 +3160,10 @@ from src
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
        Stage-2 is a root stage
    - Stage-3 depends on stages: Stage-2
    - Stage-4 depends on stages: Stage-3, Stage-5
    - Stage-0 depends on stages: Stage-3
    - Stage-5 depends on stages: Stage-2
    - Stage-1 depends on stages: Stage-5
    + Stage-3 depends on stages: Stage-2, Stage-4
    + Stage-0 depends on stages: Stage-2
    + Stage-4 depends on stages: Stage-2
    + Stage-1 depends on stages: Stage-4

      STAGE PLANS:
        Stage: Stage-2
    @@ -3248,7 +3178,7 @@ STAGE PLANS:
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
                      sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Forward
    @@ -3263,10 +3193,11 @@ STAGE PLANS:
                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                    File Output Operator
                      compressed: false
    + Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                      table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                Filter Operator
                  predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                  Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -3283,29 +3214,6 @@ STAGE PLANS:
                          serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe

        Stage: Stage-3
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -
    - Stage: Stage-4
          Dependency Collection

        Stage: Stage-0
    @@ -3314,7 +3222,7 @@ STAGE PLANS:
                hdfs directory: false
      #### A masked pattern was here ####

    - Stage: Stage-5
    + Stage: Stage-4
          Map Reduce
            Map Operator Tree:
                TableScan
    @@ -3369,15 +3277,14 @@ insert overwrite table src_multi2 select * where key > 10 and key < 20
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
        Stage-4 is a root stage
    - Stage-6 depends on stages: Stage-4, Stage-8, Stage-9
    + Stage-6 depends on stages: Stage-4, Stage-8
        Stage-0 depends on stages: Stage-6
        Stage-5 depends on stages: Stage-0
        Stage-1 depends on stages: Stage-6
        Stage-7 depends on stages: Stage-1
    + Stage-2 depends on stages: Stage-4
        Stage-8 depends on stages: Stage-4
    - Stage-2 depends on stages: Stage-8
    - Stage-9 depends on stages: Stage-4
    - Stage-3 depends on stages: Stage-9
    + Stage-3 depends on stages: Stage-8

      STAGE PLANS:
        Stage: Stage-4
    @@ -3422,7 +3329,7 @@ STAGE PLANS:
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
                      sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Forward
    @@ -3437,10 +3344,11 @@ STAGE PLANS:
                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                    File Output Operator
                      compressed: false
    + Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                      table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                Filter Operator
                  predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                  Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -3485,36 +3393,13 @@ STAGE PLANS:
        Stage: Stage-7
          Stats-Aggr Operator

    - Stage: Stage-8
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -
        Stage: Stage-2
          Move Operator
            files:
                hdfs directory: false
      #### A masked pattern was here ####

    - Stage: Stage-9
    + Stage: Stage-8
          Map Reduce
            Map Operator Tree:
                TableScan
    @@ -3616,15 +3501,14 @@ insert overwrite table src_multi2 select * where key > 10 and key < 20
      POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
        Stage-4 is a root stage
    - Stage-6 depends on stages: Stage-4, Stage-8, Stage-9
    + Stage-6 depends on stages: Stage-4, Stage-8
        Stage-0 depends on stages: Stage-6
        Stage-5 depends on stages: Stage-0
        Stage-1 depends on stages: Stage-6
        Stage-7 depends on stages: Stage-1
    + Stage-2 depends on stages: Stage-4
        Stage-8 depends on stages: Stage-4
    - Stage-2 depends on stages: Stage-8
    - Stage-9 depends on stages: Stage-4
    - Stage-3 depends on stages: Stage-9
    + Stage-3 depends on stages: Stage-8

      STAGE PLANS:
        Stage: Stage-4
    @@ -3669,7 +3553,7 @@ STAGE PLANS:
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
                      sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Forward
    @@ -3684,10 +3568,11 @@ STAGE PLANS:
                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                    File Output Operator
                      compressed: false
    + Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                      table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                Filter Operator
                  predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                  Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -3732,36 +3617,13 @@ STAGE PLANS:
        Stage: Stage-7
          Stats-Aggr Operator

    - Stage: Stage-8
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -
        Stage: Stage-2
          Move Operator
            files:
                hdfs directory: false
      #### A masked pattern was here ####

    - Stage: Stage-9
    + Stage: Stage-8
          Map Reduce
            Map Operator Tree:
                TableScan
    @@ -3865,7 +3727,7 @@ STAGE DEPENDENCIES:
        Stage-4 is a root stage
        Stage-11 depends on stages: Stage-4 , consists of Stage-8, Stage-7, Stage-9
        Stage-8
    - Stage-6 depends on stages: Stage-8, Stage-7, Stage-10, Stage-14, Stage-13, Stage-16, Stage-18, Stage-19
    + Stage-6 depends on stages: Stage-8, Stage-7, Stage-10, Stage-14, Stage-13, Stage-16, Stage-4, Stage-18
        Stage-0 depends on stages: Stage-6
        Stage-5 depends on stages: Stage-0
        Stage-1 depends on stages: Stage-6
    @@ -3878,10 +3740,9 @@ STAGE DEPENDENCIES:
        Stage-13
        Stage-15
        Stage-16 depends on stages: Stage-15
    + Stage-2 depends on stages: Stage-4
        Stage-18 depends on stages: Stage-4
    - Stage-2 depends on stages: Stage-18
    - Stage-19 depends on stages: Stage-4
    - Stage-3 depends on stages: Stage-19
    + Stage-3 depends on stages: Stage-18

      STAGE PLANS:
        Stage: Stage-4
    @@ -3926,7 +3787,7 @@ STAGE PLANS:
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
                      sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Forward
    @@ -3941,10 +3802,11 @@ STAGE PLANS:
                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                    File Output Operator
                      compressed: false
    + Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                      table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                Filter Operator
                  predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                  Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -4067,36 +3929,13 @@ STAGE PLANS:
                hdfs directory: true
      #### A masked pattern was here ####

    - Stage: Stage-18
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -
        Stage: Stage-2
          Move Operator
            files:
                hdfs directory: false
      #### A masked pattern was here ####

    - Stage: Stage-19
    + Stage: Stage-18
          Map Reduce
            Map Operator Tree:
                TableScan
    @@ -4200,7 +4039,7 @@ STAGE DEPENDENCIES:
        Stage-4 is a root stage
        Stage-11 depends on stages: Stage-4 , consists of Stage-8, Stage-7, Stage-9
        Stage-8
    - Stage-6 depends on stages: Stage-8, Stage-7, Stage-10, Stage-14, Stage-13, Stage-16, Stage-18, Stage-19
    + Stage-6 depends on stages: Stage-8, Stage-7, Stage-10, Stage-14, Stage-13, Stage-16, Stage-4, Stage-18
        Stage-0 depends on stages: Stage-6
        Stage-5 depends on stages: Stage-0
        Stage-1 depends on stages: Stage-6
    @@ -4213,10 +4052,9 @@ STAGE DEPENDENCIES:
        Stage-13
        Stage-15
        Stage-16 depends on stages: Stage-15
    + Stage-2 depends on stages: Stage-4
        Stage-18 depends on stages: Stage-4
    - Stage-2 depends on stages: Stage-18
    - Stage-19 depends on stages: Stage-4
    - Stage-3 depends on stages: Stage-19
    + Stage-3 depends on stages: Stage-18

      STAGE PLANS:
        Stage: Stage-4
    @@ -4261,7 +4099,7 @@ STAGE PLANS:
                    Reduce Output Operator
                      key expressions: key (type: string), value (type: string)
                      sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Forward
    @@ -4276,10 +4114,11 @@ STAGE PLANS:
                    Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                    File Output Operator
                      compressed: false
    + Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
                      table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                Filter Operator
                  predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                  Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -4402,36 +4241,13 @@ STAGE PLANS:
                hdfs directory: true
      #### A masked pattern was here ####

    - Stage: Stage-18
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    -
        Stage: Stage-2
          Move Operator
            files:
                hdfs directory: false
      #### A masked pattern was here ####

    - Stage: Stage-19
    + Stage: Stage-18
          Map Reduce
            Map Operator Tree:
                TableScan

    http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/ptf.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/ptf.q.out b/ql/src/test/results/clientpositive/ptf.q.out
    index e61703c..9d34e4e 100644
    --- a/ql/src/test/results/clientpositive/ptf.q.out
    +++ b/ql/src/test/results/clientpositive/ptf.q.out
    @@ -880,8 +880,7 @@ POSTHOOK: type: QUERY
      STAGE DEPENDENCIES:
        Stage-1 is a root stage
        Stage-2 depends on stages: Stage-1
    - Stage-3 depends on stages: Stage-2
    - Stage-0 depends on stages: Stage-3
    + Stage-0 depends on stages: Stage-2

      STAGE PLANS:
        Stage: Stage-1
    @@ -938,7 +937,7 @@ STAGE PLANS:
                  Reduce Output Operator
                    key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
                    sort order: +++
    - Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: int)
    + Map-reduce partition columns: _col0 (type: string)
                    Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
            Reduce Operator Tree:
              Group By Operator
    @@ -946,28 +945,6 @@ STAGE PLANS:
                mode: mergepartial
                outputColumnNames: _col0, _col1, _col2
                Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - table:
    - input format: org.apache.hadoop.mapred.SequenceFileInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe
    -
    - Stage: Stage-3
    - Map Reduce
    - Map Operator Tree:
    - TableScan
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: string)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col2 (type: int)
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int)
    - outputColumnNames: _col0, _col1, _col2
    - Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                PTF Operator
                  Function definitions:
                      Input definition

    http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out b/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
    index 2bcf1bf..9bc6345 100644
    --- a/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
    +++ b/ql/src/test/results/clientpositive/spark/multi_insert_move_tasks_share_dependencies.q.out
    @@ -2353,10 +2353,8 @@ STAGE PLANS:
        Stage: Stage-2
          Spark
            Edges:
    - Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
    - Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
    + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
    + Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
      #### A masked pattern was here ####
            Vertices:
              Map 1
    @@ -2370,35 +2368,9 @@ STAGE PLANS:
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
                            sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                            Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    - Reducer 3
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 4
    - Reduce Operator Tree:
    - Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 5
    + Reducer 2
                  Reduce Operator Tree:
                    Forward
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    @@ -2410,16 +2382,13 @@ STAGE PLANS:
                          mode: complete
                          outputColumnNames: _col0, _col1
                          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    + File Output Operator
    + compressed: false
                            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reducer 6
    - Reduce Operator Tree:
    - Forward
    - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                      Filter Operator
                        predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                        Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -2434,6 +2403,19 @@ STAGE PLANS:
                            Map-reduce partition columns: _col1 (type: string)
                            Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                            value expressions: _col0 (type: string)
    + Reducer 3
    + Reduce Operator Tree:
    + Select Operator
    + expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

        Stage: Stage-3
          Dependency Collection
    @@ -2480,10 +2462,8 @@ STAGE PLANS:
        Stage: Stage-2
          Spark
            Edges:
    - Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
    - Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
    + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
    + Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
      #### A masked pattern was here ####
            Vertices:
              Map 1
    @@ -2497,35 +2477,9 @@ STAGE PLANS:
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
                            sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                            Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    - Reducer 3
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 4
    - Reduce Operator Tree:
    - Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 5
    + Reducer 2
                  Reduce Operator Tree:
                    Forward
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    @@ -2537,16 +2491,13 @@ STAGE PLANS:
                          mode: complete
                          outputColumnNames: _col0, _col1
                          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    + File Output Operator
    + compressed: false
                            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reducer 6
    - Reduce Operator Tree:
    - Forward
    - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                      Filter Operator
                        predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                        Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -2561,6 +2512,19 @@ STAGE PLANS:
                            Map-reduce partition columns: _col1 (type: string)
                            Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                            value expressions: _col0 (type: string)
    + Reducer 3
    + Reduce Operator Tree:
    + Select Operator
    + expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

        Stage: Stage-3
          Dependency Collection
    @@ -2607,10 +2571,8 @@ STAGE PLANS:
        Stage: Stage-2
          Spark
            Edges:
    - Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
    - Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
    + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
    + Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
      #### A masked pattern was here ####
            Vertices:
              Map 1
    @@ -2624,35 +2586,9 @@ STAGE PLANS:
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
                            sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                            Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    - Reducer 3
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 4
    - Reduce Operator Tree:
    - Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 5
    + Reducer 2
                  Reduce Operator Tree:
                    Forward
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    @@ -2664,16 +2600,13 @@ STAGE PLANS:
                          mode: complete
                          outputColumnNames: _col0, _col1
                          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    + File Output Operator
    + compressed: false
                            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reducer 6
    - Reduce Operator Tree:
    - Forward
    - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                      Filter Operator
                        predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                        Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -2688,6 +2621,19 @@ STAGE PLANS:
                            Map-reduce partition columns: _col1 (type: string)
                            Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                            value expressions: _col0 (type: string)
    + Reducer 3
    + Reduce Operator Tree:
    + Select Operator
    + expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

        Stage: Stage-3
          Dependency Collection
    @@ -2734,10 +2680,8 @@ STAGE PLANS:
        Stage: Stage-2
          Spark
            Edges:
    - Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
    - Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
    + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
    + Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
      #### A masked pattern was here ####
            Vertices:
              Map 1
    @@ -2751,35 +2695,9 @@ STAGE PLANS:
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
                            sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                            Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    - Reducer 3
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 4
    - Reduce Operator Tree:
    - Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 5
    + Reducer 2
                  Reduce Operator Tree:
                    Forward
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    @@ -2791,16 +2709,13 @@ STAGE PLANS:
                          mode: complete
                          outputColumnNames: _col0, _col1
                          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    + File Output Operator
    + compressed: false
                            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reducer 6
    - Reduce Operator Tree:
    - Forward
    - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                      Filter Operator
                        predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                        Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -2815,6 +2730,19 @@ STAGE PLANS:
                            Map-reduce partition columns: _col1 (type: string)
                            Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                            value expressions: _col0 (type: string)
    + Reducer 3
    + Reduce Operator Tree:
    + Select Operator
    + expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

        Stage: Stage-3
          Dependency Collection
    @@ -2869,10 +2797,8 @@ STAGE PLANS:
        Stage: Stage-4
          Spark
            Edges:
    - Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
    - Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
    + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
    + Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
      #### A masked pattern was here ####
            Vertices:
              Map 1
    @@ -2916,35 +2842,9 @@ STAGE PLANS:
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
                            sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                            Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    - Reducer 3
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 4
    - Reduce Operator Tree:
    - Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 5
    + Reducer 2
                  Reduce Operator Tree:
                    Forward
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    @@ -2956,16 +2856,13 @@ STAGE PLANS:
                          mode: complete
                          outputColumnNames: _col0, _col1
                          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    + File Output Operator
    + compressed: false
                            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reducer 6
    - Reduce Operator Tree:
    - Forward
    - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                      Filter Operator
                        predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                        Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -2980,6 +2877,19 @@ STAGE PLANS:
                            Map-reduce partition columns: _col1 (type: string)
                            Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                            value expressions: _col0 (type: string)
    + Reducer 3
    + Reduce Operator Tree:
    + Select Operator
    + expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

        Stage: Stage-5
          Dependency Collection
    @@ -3107,10 +3017,8 @@ STAGE PLANS:
        Stage: Stage-4
          Spark
            Edges:
    - Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
    - Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
    + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
    + Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
      #### A masked pattern was here ####
            Vertices:
              Map 1
    @@ -3154,35 +3062,9 @@ STAGE PLANS:
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
                            sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                            Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    - Reducer 3
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 4
    - Reduce Operator Tree:
    - Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 5
    + Reducer 2
                  Reduce Operator Tree:
                    Forward
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    @@ -3194,16 +3076,13 @@ STAGE PLANS:
                          mode: complete
                          outputColumnNames: _col0, _col1
                          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    + File Output Operator
    + compressed: false
                            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reducer 6
    - Reduce Operator Tree:
    - Forward
    - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                      Filter Operator
                        predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                        Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -3218,6 +3097,19 @@ STAGE PLANS:
                            Map-reduce partition columns: _col1 (type: string)
                            Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                            value expressions: _col0 (type: string)
    + Reducer 3
    + Reduce Operator Tree:
    + Select Operator
    + expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

        Stage: Stage-5
          Dependency Collection
    @@ -3345,10 +3237,8 @@ STAGE PLANS:
        Stage: Stage-4
          Spark
            Edges:
    - Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
    - Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
    + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
    + Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
      #### A masked pattern was here ####
            Vertices:
              Map 1
    @@ -3392,35 +3282,9 @@ STAGE PLANS:
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
                            sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                            Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    - Reducer 3
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 4
    - Reduce Operator Tree:
    - Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 5
    + Reducer 2
                  Reduce Operator Tree:
                    Forward
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    @@ -3432,16 +3296,13 @@ STAGE PLANS:
                          mode: complete
                          outputColumnNames: _col0, _col1
                          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    + File Output Operator
    + compressed: false
                            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reducer 6
    - Reduce Operator Tree:
    - Forward
    - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                      Filter Operator
                        predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                        Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -3456,6 +3317,19 @@ STAGE PLANS:
                            Map-reduce partition columns: _col1 (type: string)
                            Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                            value expressions: _col0 (type: string)
    + Reducer 3
    + Reduce Operator Tree:
    + Select Operator
    + expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

        Stage: Stage-5
          Dependency Collection
    @@ -3583,10 +3457,8 @@ STAGE PLANS:
        Stage: Stage-4
          Spark
            Edges:
    - Reducer 5 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 6 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 3 <- Reducer 5 (PARTITION-LEVEL SORT, 2)
    - Reducer 4 <- Reducer 6 (PARTITION-LEVEL SORT, 2)
    + Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
    + Reducer 3 <- Reducer 2 (PARTITION-LEVEL SORT, 2)
      #### A masked pattern was here ####
            Vertices:
              Map 1
    @@ -3630,35 +3502,9 @@ STAGE PLANS:
                          Reduce Output Operator
                            key expressions: key (type: string), value (type: string)
                            sort order: ++
    - Map-reduce partition columns: key (type: string), value (type: string)
    + Map-reduce partition columns: key (type: string)
                            Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    - Reducer 3
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), VALUE._col0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 4
    - Reduce Operator Tree:
    - Select Operator
    - expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - File Output Operator
    - compressed: false
    - Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    - table:
    - input format: org.apache.hadoop.mapred.TextInputFormat
    - output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 5
    + Reducer 2
                  Reduce Operator Tree:
                    Forward
                      Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    @@ -3670,16 +3516,13 @@ STAGE PLANS:
                          mode: complete
                          outputColumnNames: _col0, _col1
                          Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string)
    - sort order: +
    - Map-reduce partition columns: _col0 (type: string)
    + File Output Operator
    + compressed: false
                            Statistics: Num rows: 36 Data size: 382 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col1 (type: string)
    - Reducer 6
    - Reduce Operator Tree:
    - Forward
    - Statistics: Num rows: 221 Data size: 2347 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
                      Filter Operator
                        predicate: ((KEY._col0 > 10) and (KEY._col0 < 20)) (type: boolean)
                        Statistics: Num rows: 24 Data size: 254 Basic stats: COMPLETE Column stats: NONE
    @@ -3694,6 +3537,19 @@ STAGE PLANS:
                            Map-reduce partition columns: _col1 (type: string)
                            Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
                            value expressions: _col0 (type: string)
    + Reducer 3
    + Reduce Operator Tree:
    + Select Operator
    + expressions: VALUE._col0 (type: string), KEY.reducesinkkey0 (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 12 Data size: 127 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe

        Stage: Stage-5
          Dependency Collection

    http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/spark/ptf.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/ptf.q.out b/ql/src/test/results/clientpositive/spark/ptf.q.out
    index 647b83e..6beeaf4 100644
    --- a/ql/src/test/results/clientpositive/spark/ptf.q.out
    +++ b/ql/src/test/results/clientpositive/spark/ptf.q.out
    @@ -868,8 +868,7 @@ STAGE PLANS:
          Spark
            Edges:
              Reducer 2 <- Map 1 (PARTITION-LEVEL SORT, 2)
    - Reducer 3 <- Reducer 2 (GROUP, 2)
    - Reducer 4 <- Reducer 3 (PARTITION-LEVEL SORT, 2)
    + Reducer 3 <- Reducer 2 (GROUP PARTITION-LEVEL SORT, 2)
      #### A masked pattern was here ####
            Vertices:
              Map 1
    @@ -915,7 +914,7 @@ STAGE PLANS:
                            Reduce Output Operator
                              key expressions: _col0 (type: string), _col1 (type: string), _col2 (type: int)
                              sort order: +++
    - Map-reduce partition columns: _col0 (type: string), _col1 (type: string), _col2 (type: int)
    + Map-reduce partition columns: _col0 (type: string)
                              Statistics: Num rows: 26 Data size: 3147 Basic stats: COMPLETE Column stats: NONE
              Reducer 3
                  Reduce Operator Tree:
    @@ -924,18 +923,6 @@ STAGE PLANS:
                      mode: mergepartial
                      outputColumnNames: _col0, _col1, _col2
                      Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: string)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
    - value expressions: _col2 (type: int)
    - Reducer 4
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string), VALUE._col0 (type: int)
    - outputColumnNames: _col0, _col1, _col2
    - Statistics: Num rows: 13 Data size: 1573 Basic stats: COMPLETE Column stats: NONE
                      PTF Operator
                        Function definitions:
                            Input definition

    http://git-wip-us.apache.org/repos/asf/hive/blob/538ae703/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out b/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out
    index fe95bf2..372971c 100644
    --- a/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out
    +++ b/ql/src/test/results/clientpositive/spark/union_remove_6_subq.q.out
    @@ -383,9 +383,8 @@ STAGE PLANS:
          Spark
            Edges:
              Reducer 2 <- Map 1 (GROUP, 2)
    - Reducer 6 <- Map 1 (GROUP, 2)
    - Reducer 3 <- Reducer 2 (GROUP, 2), Reducer 6 (GROUP, 2)
    - Reducer 4 <- Reducer 3 (PARTITION-LEVEL SORT, 2)
    + Reducer 5 <- Map 1 (GROUP, 2)
    + Reducer 3 <- Reducer 2 (GROUP PARTITION-LEVEL SORT, 2), Reducer 5 (GROUP PARTITION-LEVEL SORT, 2)
      #### A masked pattern was here ####
            Vertices:
              Map 1
    @@ -429,7 +428,7 @@ STAGE PLANS:
                          Reduce Output Operator
                            key expressions: _col0 (type: string), _col1 (type: bigint)
                            sort order: ++
    - Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
    + Map-reduce partition columns: _col0 (type: string)
                            Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
              Reducer 3
                  Reduce Operator Tree:
    @@ -438,17 +437,6 @@ STAGE PLANS:
                      mode: mergepartial
                      outputColumnNames: _col0, _col1
                      Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    - Reduce Output Operator
    - key expressions: _col0 (type: string), _col1 (type: bigint)
    - sort order: ++
    - Map-reduce partition columns: _col0 (type: string)
    - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    - Reducer 4
    - Reduce Operator Tree:
    - Select Operator
    - expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: bigint)
    - outputColumnNames: _col0, _col1
    - Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
                      PTF Operator
                        Function definitions:
                            Input definition
    @@ -480,7 +468,7 @@ STAGE PLANS:
                                input format: org.apache.hadoop.mapred.TextInputFormat
                                output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
                                serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    - Reducer 6
    + Reducer 5
                  Reduce Operator Tree:
                    Group By Operator
                      aggregations: count(VALUE._col0)
    @@ -496,7 +484,7 @@ STAGE PLANS:
                        Reduce Output Operator
                          key expressions: _col0 (type: string), _col1 (type: bigint)
                          sort order: ++
    - Map-reduce partition columns: _col0 (type: string), _col1 (type: bigint)
    + Map-reduce partition columns: _col0 (type: string)
                          Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE

        Stage: Stage-0
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11541: ORC: Split Strategy should depend on global file count, not per-partition (Gopal V reviewed by Prasanth Jayachandran)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/f26b2569
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/f26b2569
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/f26b2569

    Branch: refs/heads/hbase-metastore
    Commit: f26b2569198fbeceaf17a5a77c59eccf5175935c
    Parents: db46e6e
    Author: Prasanth Jayachandran <j.prasanth.j@gmail.com>
    Authored: Thu Aug 13 12:35:29 2015 -0700
    Committer: Prasanth Jayachandran <j.prasanth.j@gmail.com>
    Committed: Thu Aug 13 12:35:29 2015 -0700

    ----------------------------------------------------------------------
      ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java | 4 ++--
      1 file changed, 2 insertions(+), 2 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/f26b2569/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
    index 4e6dd7a..fe2eccd 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java
    @@ -483,7 +483,6 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
          }

          private FileInfo verifyCachedFileInfo(FileStatus file) {
    - context.numFilesCounter.incrementAndGet();
            FileInfo fileInfo = Context.footerCache.getIfPresent(file.getPath());
            if (fileInfo != null) {
              if (isDebugEnabled) {
    @@ -671,6 +670,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,

              int numFiles = children.size();
              long avgFileSize = totalFileSize / numFiles;
    + int totalFiles = context.numFilesCounter.addAndGet(numFiles);
              switch(context.splitStrategyKind) {
                case BI:
                  // BI strategy requested through config
    @@ -684,7 +684,7 @@ public class OrcInputFormat implements InputFormat<NullWritable, OrcStruct>,
                  break;
                default:
                  // HYBRID strategy
    - if (avgFileSize > context.maxSize || numFiles <= context.minSplits) {
    + if (avgFileSize > context.maxSize || totalFiles <= context.minSplits) {
                    splitStrategy = new ETLSplitStrategy(context, fs, dir, children, isOriginal, deltas,
                        covered);
                  } else {
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11480: CBO: Calcite Operator To Hive Operator (Calcite Return Path): char/varchar as input to GenericUDAF (Pengcheng Xiong, reviewed by Jesus Camacho Rodriguez)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0140df74
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0140df74
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0140df74

    Branch: refs/heads/hbase-metastore
    Commit: 0140df748f6714cc327132f008a13f6af5e41397
    Parents: c4ceefb
    Author: Pengcheng Xiong <pxiong@apache.org>
    Authored: Wed Aug 12 10:43:35 2015 -0700
    Committer: Pengcheng Xiong <pxiong@apache.org>
    Committed: Wed Aug 12 10:43:35 2015 -0700

    ----------------------------------------------------------------------
      .../java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java | 2 ++
      .../org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java | 2 ++
      2 files changed, 4 insertions(+)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/0140df74/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java
    index 159a2fe..071884c 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java
    @@ -55,6 +55,8 @@ public class GenericUDAFStd extends GenericUDAFVariance {
          case FLOAT:
          case DOUBLE:
          case STRING:
    + case VARCHAR:
    + case CHAR:
          case TIMESTAMP:
          case DECIMAL:
            return new GenericUDAFStdEvaluator();

    http://git-wip-us.apache.org/repos/asf/hive/blob/0140df74/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java
    index 3545390..2950605 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java
    @@ -72,6 +72,8 @@ public class GenericUDAFVariance extends AbstractGenericUDAFResolver {
          case FLOAT:
          case DOUBLE:
          case STRING:
    + case VARCHAR:
    + case CHAR:
          case TIMESTAMP:
          case DECIMAL:
            return new GenericUDAFVarianceEvaluator();
  • Sershe at Aug 17, 2015 at 10:00 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/ppd_join2.q.out b/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
    index 6fed2dc..b4c7f23 100644
    --- a/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
    +++ b/ql/src/test/results/clientpositive/spark/ppd_join2.q.out
    @@ -78,7 +78,7 @@ STAGE PLANS:
                            outputColumnNames: _col0, _col1
                            Statistics: Num rows: 42 Data size: 446 Basic stats: COMPLETE Column stats: NONE
                            Filter Operator
    - predicate: ((_col0 <> '311') and (((_col1 <> 'val_50') or (_col0 > '1')) and (_col0 < '400'))) (type: boolean)
    + predicate: ((_col0 <> '311') and ((_col1 <> 'val_50') or (_col0 > '1')) and (_col0 < '400')) (type: boolean)
                              Statistics: Num rows: 14 Data size: 148 Basic stats: COMPLETE Column stats: NONE
                              Filter Operator
                                predicate: ((_col0 <> '305') and (_col0 <> '14')) (type: boolean)
    @@ -108,7 +108,7 @@ STAGE PLANS:
                              predicate: (_col0 <> '14') (type: boolean)
                              Statistics: Num rows: 83 Data size: 881 Basic stats: COMPLETE Column stats: NONE
                              Filter Operator
    - predicate: ((_col0 <> '302') and ((_col0 <> '311') and (_col0 < '400'))) (type: boolean)
    + predicate: ((_col0 <> '302') and (_col0 <> '311') and (_col0 < '400')) (type: boolean)
                                Statistics: Num rows: 27 Data size: 286 Basic stats: COMPLETE Column stats: NONE
                                Filter Operator
                                  predicate: _col0 is not null (type: boolean)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/ppd_join3.q.out b/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
    index 661d9d1..c9f6762 100644
    --- a/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
    +++ b/ql/src/test/results/clientpositive/spark/ppd_join3.q.out
    @@ -54,7 +54,7 @@ STAGE PLANS:
                              predicate: (_col0 <> '1') (type: boolean)
                              Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE
                              Filter Operator
    - predicate: ((_col0 <> '11') and ((_col0 > '0') and ((_col0 < '400') and ((_col0 <> '12') and (_col0 <> '4'))))) (type: boolean)
    + predicate: ((_col0 <> '11') and (_col0 > '0') and (_col0 < '400') and (_col0 <> '12') and (_col0 <> '4')) (type: boolean)
                                Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                                Filter Operator
                                  predicate: _col0 is not null (type: boolean)
    @@ -77,7 +77,7 @@ STAGE PLANS:
                            outputColumnNames: _col0, _col1
                            Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE
                            Filter Operator
    - predicate: ((_col0 > '0') and (((_col1 <> 'val_500') or (_col0 > '1')) and (_col0 < '400'))) (type: boolean)
    + predicate: ((_col0 > '0') and ((_col1 <> 'val_500') or (_col0 > '1')) and (_col0 < '400')) (type: boolean)
                              Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                              Select Operator
                                expressions: _col0 (type: string)
    @@ -110,7 +110,7 @@ STAGE PLANS:
                              predicate: (_col0 <> '4') (type: boolean)
                              Statistics: Num rows: 28 Data size: 297 Basic stats: COMPLETE Column stats: NONE
                              Filter Operator
    - predicate: ((_col0 <> '11') and ((_col0 > '0') and (_col0 < '400'))) (type: boolean)
    + predicate: ((_col0 <> '11') and (_col0 > '0') and (_col0 < '400')) (type: boolean)
                                Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                                Filter Operator
                                  predicate: _col0 is not null (type: boolean)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
    index db5914c..16acc67 100644
    --- a/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
    +++ b/ql/src/test/results/clientpositive/spark/ppd_outer_join4.q.out
    @@ -99,7 +99,7 @@ STAGE PLANS:
                            outputColumnNames: _col0
                            Statistics: Num rows: 3 Data size: 31 Basic stats: COMPLETE Column stats: NONE
                            Filter Operator
    - predicate: ((_col0 > '10') and ((_col0 < '20') and ((_col0 > '15') and (_col0 < '25')))) (type: boolean)
    + predicate: ((_col0 > '10') and (_col0 < '20') and (_col0 > '15') and (_col0 < '25')) (type: boolean)
                              Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: NONE
                              Filter Operator
                                predicate: _col0 is not null (type: boolean)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
    index d1dc486..90bcc1b 100644
    --- a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
    +++ b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
    @@ -994,7 +994,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cstring2 like '%b%') or ((79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble))) (type: boolean)
    + predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble)) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
    index aa10d96..22be1d7 100644
    --- a/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
    +++ b/ql/src/test/results/clientpositive/spark/vectorization_13.q.out
    @@ -86,7 +86,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((cfloat < 3569.0) and ((10.175 >= cdouble) and (cboolean1 <> 1))) or ((UDFToDouble(ctimestamp1) > 11.0) and ((UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639)))) (type: boolean)
    + predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                          Statistics: Num rows: 2730 Data size: 83809 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
    @@ -339,7 +339,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((cfloat < 3569.0) and ((10.175 >= cdouble) and (cboolean1 <> 1))) or ((UDFToDouble(ctimestamp1) > -1.388) and ((UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639)))) (type: boolean)
    + predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                          Statistics: Num rows: 2730 Data size: 83809 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out b/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
    index 4bd24c3..8013bfe 100644
    --- a/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
    +++ b/ql/src/test/results/clientpositive/spark/vectorization_15.q.out
    @@ -82,7 +82,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cstring2 like '%ss%') or ((cstring1 like '10%') or ((cint >= -75) and ((UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))))) (type: boolean)
    + predicate: ((cstring2 like '%ss%') or (cstring1 like '10%') or ((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/vectorization_17.q.out b/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
    index ea5b0da..1a6e971 100644
    --- a/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
    +++ b/ql/src/test/results/clientpositive/spark/vectorization_17.q.out
    @@ -67,7 +67,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or ((UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)))) (type: boolean)
    + predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble))) (type: boolean)
                          Statistics: Num rows: 4778 Data size: 146682 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % UDFToDouble(cbigint)) (type: double), (2563.58 + (- (- cdouble))) (type: double)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
    index ae233f4..6bd1bb2 100644
    --- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
    +++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
    @@ -152,7 +152,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((762 = cbigint) or (((UDFToFloat(csmallint) < cfloat) and ((UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint)))) or ((cstring1 = 'a') or ((UDFToDouble(cbigint) <= -1.389) and ((cstring2 <> 'a') and ((79.553 <> UDFToDouble(cint)) and (cboolean2 <> cboolean1))))))) (type: boolean)
    + predicate: ((762 = cbigint) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint))) or (cstring1 = 'a') or ((UDFToDouble(cbigint) <= -1.389) and (cstring2 <> 'a') and (79.553 <> UDFToDouble(cint)) and (cboolean2 <> cboolean1))) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cint (type: int), cdouble (type: double), csmallint (type: smallint), cfloat (type: float), ctinyint (type: tinyint)
    @@ -364,7 +364,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or (((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or (((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))))) (type: boolean)
    + predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or ((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or ((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))) (type: boolean)
                          Statistics: Num rows: 6826 Data size: 209555 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cint (type: int), cbigint (type: bigint), csmallint (type: smallint), cdouble (type: double), ctinyint (type: tinyint)
    @@ -567,7 +567,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((ctimestamp1 = ctimestamp2) or ((762.0 = cfloat) or ((cstring1 = 'ss') or (((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and (ctimestamp2 is not null and (cstring2 > 'a'))))))) (type: boolean)
    + predicate: ((ctimestamp1 = ctimestamp2) or (762.0 = cfloat) or (cstring1 = 'ss') or ((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and ctimestamp2 is not null and (cstring2 > 'a'))) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cbigint (type: bigint), ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cdouble (type: double)
    @@ -749,7 +749,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((ctimestamp2 <= ctimestamp1) and ((UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1))) or (((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17.0))) (type: boolean)
    + predicate: (((ctimestamp2 <= ctimestamp1) and (UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1)) or ((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17.0)) (type: boolean)
                          Statistics: Num rows: 8874 Data size: 272428 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: ctinyint (type: tinyint), cbigint (type: bigint), cint (type: int), cfloat (type: float)
    @@ -939,7 +939,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or (((1 <> cboolean2) and ((UDFToDouble(csmallint) < 79.553) and (-257 <> UDFToInteger(ctinyint)))) or (((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))))) (type: boolean)
    + predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or ((1 <> cboolean2) and (UDFToDouble(csmallint) < 79.553) and (-257 <> UDFToInteger(ctinyint))) or ((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))) (type: boolean)
                          Statistics: Num rows: 9898 Data size: 303864 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cint (type: int), cdouble (type: double), ctimestamp2 (type: timestamp), cstring1 (type: string), cboolean2 (type: boolean), ctinyint (type: tinyint), cfloat (type: float), ctimestamp1 (type: timestamp), csmallint (type: smallint), cbigint (type: bigint), (-3728 * cbigint) (type: bigint), (- cint) (type: int), (-863.257 - UDFToDouble(cint)) (type: double), (- csmallint) (type: smallint), (csmallint - (- csmallint)) (type: smallint), ((csmallint - (- csmallint)) + (- csmallint)) (type: smallint), (UDFToDouble(cint) / UDFToDouble(cint)) (type: double), ((-863.257 - UDFToDouble(cint)) - -26.28) (type: double), (- cfloat) (type: float), (cdouble * -89010.0) (type: double), (UDFToDouble(ctinyint) / 988888.0) (type: double), (- ctinyint) (type: tinyint), (79.553 / UDFToDouble(ctinyint)) (type: double)
    @@ -1197,7 +1197,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((197.0 > UDFToDouble(ctinyint)) and (UDFToLong(cint) = cbigint)) or ((cbigint = 359) or ((cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))))) (type: boolean)
    + predicate: (((197.0 > UDFToDouble(ctinyint)) and (UDFToLong(cint) = cbigint)) or (cbigint = 359) or (cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cboolean1 (type: boolean), cfloat (type: float), cdouble (type: double), ctimestamp2 (type: timestamp), csmallint (type: smallint), cstring2 (type: string), cboolean2 (type: boolean), (UDFToDouble(cint) / UDFToDouble(cbigint)) (type: double), (UDFToDouble(cbigint) % 79.553) (type: double), (- (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (10.175 % UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cfloat - (- cfloat)) (type: float), ((cfloat - (- cfloat)) % -6432.0) (type: float), (cdouble * UDFToDouble(csmallint)) (type: double), (- cdouble) (type: double), (- cbigint) (type: bigint), (UDFToDouble(cfloat) - (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (- csmallint) (type: smallint), (3569 % cbigint) (type: bigint), (359.0 - cdouble) (type: double), (- csmallint) (type: smallint)
    @@ -1404,7 +1404,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((UDFToDouble(csmallint) > -26.28) and (cstring2 like 'ss')) or (((cdouble <= UDFToDouble(cbigint)) and ((cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble))) or ((UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= UDFToDouble(csmallint)))))) (type: boolean)
    + predicate: (((UDFToDouble(csmallint) > -26.28) and (cstring2 like 'ss')) or ((cdouble <= UDFToDouble(cbigint)) and (cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble)) or (UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= UDFToDouble(csmallint)))) (type: boolean)
                          Statistics: Num rows: 10922 Data size: 335301 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cint (type: int), cstring1 (type: string), cboolean2 (type: boolean), ctimestamp2 (type: timestamp), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), cboolean1 (type: boolean), (cint + UDFToInteger(csmallint)) (type: int), (cbigint - UDFToLong(ctinyint)) (type: bigint), (- cbigint) (type: bigint), (- cfloat) (type: float), ((cbigint - UDFToLong(ctinyint)) + cbigint) (type: bigint), (cdouble / cdouble) (type: double), (- cdouble) (type: double), (UDFToLong((cint + UDFToInteger(csmallint))) * (- cbigint)) (type: bigint), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (-1.389 / UDFToDouble(ctinyint)) (type: double), (UDFToDouble(cbigint) % cdouble) (type: double), (- csmallint) (type: smallint), (UDFToInteger(csmallint) + (cint + UDFToInteger(csmallint))) (type: int)
    @@ -1670,7 +1670,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((-1.389 >= UDFToDouble(cint)) and ((csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint)))) or (((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > UDFToDouble(cbigint))))) (type: boolean)
    + predicate: (((-1.389 >= UDFToDouble(cint)) and (csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint))) or ((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > UDFToDouble(cbigint)))) (type: boolean)
                          Statistics: Num rows: 3868 Data size: 118746 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: ctimestamp1 (type: timestamp), cstring2 (type: string), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), (UDFToDouble(cbigint) / 3569.0) (type: double), (-257 - UDFToInteger(csmallint)) (type: int), (-6432.0 * cfloat) (type: float), (- cdouble) (type: double), (cdouble * 10.175) (type: double), (UDFToDouble((-6432.0 * cfloat)) / UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cint % UDFToInteger(csmallint)) (type: int), (- cdouble) (type: double), (cdouble * (- cdouble)) (type: double)
    @@ -2085,7 +2085,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and ((UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15))) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
    + predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
                          Statistics: Num rows: 606 Data size: 18603 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cdouble (type: double), cfloat (type: float)
    @@ -2340,7 +2340,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and (cboolean2 is not null and (cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))))) or ((UDFToDouble(ctimestamp2) = -5.0) or (((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or ((cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint)))))))) (type: boolean)
    + predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: ctimestamp1 (type: timestamp), cstring1 (type: string), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
    @@ -2672,7 +2672,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (cboolean1 is not null and (((cdouble < UDFToDouble(csmallint)) and ((cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257))) or (((cint >= -257) and (cstring1 is not null and (cboolean1 >= 1))) or (cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null))))) (type: boolean)
    + predicate: (cboolean1 is not null and (((cdouble < UDFToDouble(csmallint)) and (cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257)) or ((cint >= -257) and cstring1 is not null and (cboolean1 >= 1)) or cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null))) (type: boolean)
                          Statistics: Num rows: 4778 Data size: 146682 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cboolean1 (type: boolean), cfloat (type: float), cbigint (type: bigint), cint (type: int), cdouble (type: double), ctinyint (type: tinyint), csmallint (type: smallint)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
    index 7f824f1..c2250e6 100644
    --- a/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
    +++ b/ql/src/test/results/clientpositive/spark/vectorized_case.q.out
    @@ -45,7 +45,7 @@ STAGE PLANS:
              TableScan
                alias: alltypesorc
                Filter Operator
    - predicate: ((csmallint = 418) or ((csmallint = 12205) or (csmallint = 10583))) (type: boolean)
    + predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
                  Select Operator
                    expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
                    outputColumnNames: _col0, _col1, _col2

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out b/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out
    index c53f0c4..be58a2b 100644
    --- a/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vector_mr_diff_schema_alias.q.out
    @@ -323,7 +323,7 @@ STAGE PLANS:
                      outputColumnNames: _col0, _col22, _col26, _col50, _col58
                      Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                      Filter Operator
    - predicate: (((_col0 = _col58) and (_col22 = _col26)) and (_col50) IN ('KS', 'AL', 'MN', 'AL', 'SC', 'VT')) (type: boolean)
    + predicate: ((_col0 = _col58) and (_col22 = _col26) and (_col50) IN ('KS', 'AL', 'MN', 'AL', 'SC', 'VT')) (type: boolean)
                        Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                        Select Operator
                          expressions: _col50 (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorization_0.q.out b/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
    index 9bd62ad..cfbe9ce 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
    @@ -994,7 +994,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cstring2 like '%b%') or ((79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble))) (type: boolean)
    + predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble)) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorization_13.q.out b/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
    index 687add6..6214640 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorization_13.q.out
    @@ -86,7 +86,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((cfloat < 3569.0) and ((10.175 >= cdouble) and (cboolean1 <> 1))) or ((UDFToDouble(ctimestamp1) > 11.0) and ((UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639)))) (type: boolean)
    + predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                          Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
    @@ -339,7 +339,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((cfloat < 3569.0) and ((10.175 >= cdouble) and (cboolean1 <> 1))) or ((UDFToDouble(ctimestamp1) > -1.388) and ((UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639)))) (type: boolean)
    + predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                          Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorization_15.q.out b/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
    index 456768e..1858cb0 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorization_15.q.out
    @@ -82,7 +82,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cstring2 like '%ss%') or ((cstring1 like '10%') or ((cint >= -75) and ((UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))))) (type: boolean)
    + predicate: ((cstring2 like '%ss%') or (cstring1 like '10%') or ((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_17.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorization_17.q.out b/ql/src/test/results/clientpositive/tez/vectorization_17.q.out
    index b5c71a4..1719176 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorization_17.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorization_17.q.out
    @@ -67,7 +67,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or ((UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)))) (type: boolean)
    + predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble))) (type: boolean)
                          Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % UDFToDouble(cbigint)) (type: double), (2563.58 + (- (- cdouble))) (type: double)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorization_7.q.out b/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
    index b4003ef..61cd932 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorization_7.q.out
    @@ -73,7 +73,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or ((UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
    + predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss'))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
                          Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)
    @@ -262,7 +262,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or ((UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
    + predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss'))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
                          Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_8.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorization_8.q.out b/ql/src/test/results/clientpositive/tez/vectorization_8.q.out
    index 096aca9..3267860 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorization_8.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorization_8.q.out
    @@ -69,7 +69,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cstring2 is not null and ((UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0))) or ((cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0)))) (type: boolean)
    + predicate: ((cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0)) or (cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0))) (type: boolean)
                          Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double)
    @@ -245,7 +245,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cstring2 is not null and ((UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998))) or ((cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0)))) (type: boolean)
    + predicate: ((cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998)) or (cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0))) (type: boolean)
                          Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
    index 9729a8f..5e0d42c 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
    @@ -152,7 +152,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((762 = cbigint) or (((UDFToFloat(csmallint) < cfloat) and ((UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint)))) or ((cstring1 = 'a') or ((UDFToDouble(cbigint) <= -1.389) and ((cstring2 <> 'a') and ((79.553 <> UDFToDouble(cint)) and (cboolean2 <> cboolean1))))))) (type: boolean)
    + predicate: ((762 = cbigint) or ((UDFToFloat(csmallint) < cfloat) and (UDFToDouble(ctimestamp2) > -5.0) and (cdouble <> UDFToDouble(cint))) or (cstring1 = 'a') or ((UDFToDouble(cbigint) <= -1.389) and (cstring2 <> 'a') and (79.553 <> UDFToDouble(cint)) and (cboolean2 <> cboolean1))) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cint (type: int), cdouble (type: double), csmallint (type: smallint), cfloat (type: float), ctinyint (type: tinyint)
    @@ -364,7 +364,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or (((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or (((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))))) (type: boolean)
    + predicate: (((cbigint <= 197) and (UDFToLong(cint) < cbigint)) or ((cdouble >= -26.28) and (UDFToDouble(csmallint) > cdouble)) or ((UDFToFloat(ctinyint) > cfloat) and cstring1 regexp '.*ss.*') or ((cfloat > 79.553) and (cstring2 like '10%'))) (type: boolean)
                          Statistics: Num rows: 6826 Data size: 1467614 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cint (type: int), cbigint (type: bigint), csmallint (type: smallint), cdouble (type: double), ctinyint (type: tinyint)
    @@ -567,7 +567,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((ctimestamp1 = ctimestamp2) or ((762.0 = cfloat) or ((cstring1 = 'ss') or (((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and (ctimestamp2 is not null and (cstring2 > 'a'))))))) (type: boolean)
    + predicate: ((ctimestamp1 = ctimestamp2) or (762.0 = cfloat) or (cstring1 = 'ss') or ((UDFToLong(csmallint) <= cbigint) and (1 = cboolean2)) or (cboolean1 is not null and ctimestamp2 is not null and (cstring2 > 'a'))) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cbigint (type: bigint), ctinyint (type: tinyint), csmallint (type: smallint), cint (type: int), cdouble (type: double)
    @@ -749,7 +749,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((ctimestamp2 <= ctimestamp1) and ((UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1))) or (((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17.0))) (type: boolean)
    + predicate: (((ctimestamp2 <= ctimestamp1) and (UDFToDouble(cbigint) <> cdouble) and ('ss' <= cstring1)) or ((csmallint < UDFToShort(ctinyint)) and (UDFToDouble(ctimestamp1) >= 0.0)) or (cfloat = 17.0)) (type: boolean)
                          Statistics: Num rows: 8874 Data size: 1907941 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: ctinyint (type: tinyint), cbigint (type: bigint), cint (type: int), cfloat (type: float)
    @@ -939,7 +939,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or (((1 <> cboolean2) and ((UDFToDouble(csmallint) < 79.553) and (-257 <> UDFToInteger(ctinyint)))) or (((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))))) (type: boolean)
    + predicate: ((cstring1 regexp 'a.*' and (cstring2 like '%ss%')) or ((1 <> cboolean2) and (UDFToDouble(csmallint) < 79.553) and (-257 <> UDFToInteger(ctinyint))) or ((cdouble > UDFToDouble(ctinyint)) and (cfloat >= UDFToFloat(cint))) or ((UDFToLong(cint) < cbigint) and (UDFToLong(ctinyint) > cbigint))) (type: boolean)
                          Statistics: Num rows: 9898 Data size: 2128105 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cint (type: int), cdouble (type: double), ctimestamp2 (type: timestamp), cstring1 (type: string), cboolean2 (type: boolean), ctinyint (type: tinyint), cfloat (type: float), ctimestamp1 (type: timestamp), csmallint (type: smallint), cbigint (type: bigint), (-3728 * cbigint) (type: bigint), (- cint) (type: int), (-863.257 - UDFToDouble(cint)) (type: double), (- csmallint) (type: smallint), (csmallint - (- csmallint)) (type: smallint), ((csmallint - (- csmallint)) + (- csmallint)) (type: smallint), (UDFToDouble(cint) / UDFToDouble(cint)) (type: double), ((-863.257 - UDFToDouble(cint)) - -26.28) (type: double), (- cfloat) (type: float), (cdouble * -89010.0) (type: double), (UDFToDouble(ctinyint) / 988888.0) (type: double), (- ctinyint) (type: tinyint), (79.553 / UDFToDouble(ctinyint)) (type: double)
    @@ -1197,7 +1197,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((197.0 > UDFToDouble(ctinyint)) and (UDFToLong(cint) = cbigint)) or ((cbigint = 359) or ((cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))))) (type: boolean)
    + predicate: (((197.0 > UDFToDouble(ctinyint)) and (UDFToLong(cint) = cbigint)) or (cbigint = 359) or (cboolean1 < 0) or ((cstring1 like '%ss') and (cfloat <= UDFToFloat(ctinyint)))) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cint (type: int), cbigint (type: bigint), cstring1 (type: string), cboolean1 (type: boolean), cfloat (type: float), cdouble (type: double), ctimestamp2 (type: timestamp), csmallint (type: smallint), cstring2 (type: string), cboolean2 (type: boolean), (UDFToDouble(cint) / UDFToDouble(cbigint)) (type: double), (UDFToDouble(cbigint) % 79.553) (type: double), (- (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (10.175 % UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cfloat - (- cfloat)) (type: float), ((cfloat - (- cfloat)) % -6432.0) (type: float), (cdouble * UDFToDouble(csmallint)) (type: double), (- cdouble) (type: double), (- cbigint) (type: bigint), (UDFToDouble(cfloat) - (UDFToDouble(cint) / UDFToDouble(cbigint))) (type: double), (- csmallint) (type: smallint), (3569 % cbigint) (type: bigint), (359.0 - cdouble) (type: double), (- csmallint) (type: smallint)
    @@ -1404,7 +1404,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((UDFToDouble(csmallint) > -26.28) and (cstring2 like 'ss')) or (((cdouble <= UDFToDouble(cbigint)) and ((cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble))) or ((UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= UDFToDouble(csmallint)))))) (type: boolean)
    + predicate: (((UDFToDouble(csmallint) > -26.28) and (cstring2 like 'ss')) or ((cdouble <= UDFToDouble(cbigint)) and (cstring1 >= 'ss') and (UDFToDouble(cint) <> cdouble)) or (UDFToInteger(ctinyint) = -89010) or ((UDFToFloat(cbigint) <= cfloat) and (-26.28 <= UDFToDouble(csmallint)))) (type: boolean)
                          Statistics: Num rows: 10922 Data size: 2348269 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cint (type: int), cstring1 (type: string), cboolean2 (type: boolean), ctimestamp2 (type: timestamp), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), cboolean1 (type: boolean), (cint + UDFToInteger(csmallint)) (type: int), (cbigint - UDFToLong(ctinyint)) (type: bigint), (- cbigint) (type: bigint), (- cfloat) (type: float), ((cbigint - UDFToLong(ctinyint)) + cbigint) (type: bigint), (cdouble / cdouble) (type: double), (- cdouble) (type: double), (UDFToLong((cint + UDFToInteger(csmallint))) * (- cbigint)) (type: bigint), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (-1.389 / UDFToDouble(ctinyint)) (type: double), (UDFToDouble(cbigint) % cdouble) (type: double), (- csmallint) (type: smallint), (UDFToInteger(csmallint) + (cint + UDFToInteger(csmallint))) (type: int)
    @@ -1670,7 +1670,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (((-1.389 >= UDFToDouble(cint)) and ((csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint)))) or (((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > UDFToDouble(cbigint))))) (type: boolean)
    + predicate: (((-1.389 >= UDFToDouble(cint)) and (csmallint < UDFToShort(ctinyint)) and (-6432 > UDFToInteger(csmallint))) or ((cdouble >= UDFToDouble(cfloat)) and (cstring2 <= 'a')) or ((cstring1 like 'ss%') and (10.175 > UDFToDouble(cbigint)))) (type: boolean)
                          Statistics: Num rows: 3868 Data size: 831633 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: ctimestamp1 (type: timestamp), cstring2 (type: string), cdouble (type: double), cfloat (type: float), cbigint (type: bigint), csmallint (type: smallint), (UDFToDouble(cbigint) / 3569.0) (type: double), (-257 - UDFToInteger(csmallint)) (type: int), (-6432.0 * cfloat) (type: float), (- cdouble) (type: double), (cdouble * 10.175) (type: double), (UDFToDouble((-6432.0 * cfloat)) / UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), (cint % UDFToInteger(csmallint)) (type: int), (- cdouble) (type: double), (cdouble * (- cdouble)) (type: double)
    @@ -2085,7 +2085,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and ((UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15))) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
    + predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
                          Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cdouble (type: double), cfloat (type: float)
    @@ -2340,7 +2340,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and (cboolean2 is not null and (cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))))) or ((UDFToDouble(ctimestamp2) = -5.0) or (((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or ((cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint)))))))) (type: boolean)
    + predicate: ((UDFToDouble(ctimestamp1) <> 0.0) and (((-257 <> UDFToInteger(ctinyint)) and cboolean2 is not null and cstring1 regexp '.*ss' and (-3.0 < UDFToDouble(ctimestamp1))) or (UDFToDouble(ctimestamp2) = -5.0) or ((UDFToDouble(ctimestamp1) < 0.0) and (cstring2 like '%b%')) or (cdouble = UDFToDouble(cint)) or (cboolean1 is null and (cfloat < UDFToFloat(cint))))) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: ctimestamp1 (type: timestamp), cstring1 (type: string), cint (type: int), csmallint (type: smallint), ctinyint (type: tinyint), cfloat (type: float), cdouble (type: double)
    @@ -2672,7 +2672,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: (cboolean1 is not null and (((cdouble < UDFToDouble(csmallint)) and ((cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257))) or (((cint >= -257) and (cstring1 is not null and (cboolean1 >= 1))) or (cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null))))) (type: boolean)
    + predicate: (cboolean1 is not null and (((cdouble < UDFToDouble(csmallint)) and (cboolean2 = cboolean1) and (UDFToDouble(cbigint) <= -863.257)) or ((cint >= -257) and cstring1 is not null and (cboolean1 >= 1)) or cstring2 regexp 'b' or ((csmallint >= UDFToShort(ctinyint)) and ctimestamp2 is null))) (type: boolean)
                          Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cboolean1 (type: boolean), cfloat (type: float), cbigint (type: bigint), cint (type: int), cdouble (type: double), ctinyint (type: tinyint), csmallint (type: smallint)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorized_case.q.out b/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
    index 7f824f1..c2250e6 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorized_case.q.out
    @@ -45,7 +45,7 @@ STAGE PLANS:
              TableScan
                alias: alltypesorc
                Filter Operator
    - predicate: ((csmallint = 418) or ((csmallint = 12205) or (csmallint = 10583))) (type: boolean)
    + predicate: ((csmallint = 418) or (csmallint = 12205) or (csmallint = 10583)) (type: boolean)
                  Select Operator
                    expressions: csmallint (type: smallint), CASE WHEN ((csmallint = 418)) THEN ('a') WHEN ((csmallint = 12205)) THEN ('b') ELSE ('c') END (type: string), CASE (csmallint) WHEN (418) THEN ('a') WHEN (12205) THEN ('b') ELSE ('c') END (type: string)
                    outputColumnNames: _col0, _col1, _col2

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/udf_or.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/udf_or.q.out b/ql/src/test/results/clientpositive/udf_or.q.out
    index e03cbcd..cd3e2da 100644
    --- a/ql/src/test/results/clientpositive/udf_or.q.out
    +++ b/ql/src/test/results/clientpositive/udf_or.q.out
    @@ -2,9 +2,9 @@ PREHOOK: query: DESCRIBE FUNCTION or
      PREHOOK: type: DESCFUNCTION
      POSTHOOK: query: DESCRIBE FUNCTION or
      POSTHOOK: type: DESCFUNCTION
    -a or b - Logical or
    +a1 or a2 or ... or an - Logical or
      PREHOOK: query: DESCRIBE FUNCTION EXTENDED or
      PREHOOK: type: DESCFUNCTION
      POSTHOOK: query: DESCRIBE FUNCTION EXTENDED or
      POSTHOOK: type: DESCFUNCTION
    -a or b - Logical or
    +a1 or a2 or ... or an - Logical or

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out b/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
    index 4619403..288025d 100644
    --- a/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
    +++ b/ql/src/test/results/clientpositive/vector_mr_diff_schema_alias.q.out
    @@ -320,7 +320,7 @@ STAGE PLANS:
                outputColumnNames: _col0, _col22, _col26, _col50, _col58
                Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                Filter Operator
    - predicate: (((_col0 = _col58) and (_col22 = _col26)) and (_col50) IN ('KS', 'AL', 'MN', 'AL', 'SC', 'VT')) (type: boolean)
    + predicate: ((_col0 = _col58) and (_col22 = _col26) and (_col50) IN ('KS', 'AL', 'MN', 'AL', 'SC', 'VT')) (type: boolean)
                  Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
                  Select Operator
                    expressions: _col50 (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_0.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/vectorization_0.q.out b/ql/src/test/results/clientpositive/vectorization_0.q.out
    index 531bc84..3fab2ff 100644
    --- a/ql/src/test/results/clientpositive/vectorization_0.q.out
    +++ b/ql/src/test/results/clientpositive/vectorization_0.q.out
    @@ -1010,7 +1010,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((cstring2 like '%b%') or ((79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble))) (type: boolean)
    + predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble)) (type: boolean)
                    Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_13.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/vectorization_13.q.out b/ql/src/test/results/clientpositive/vectorization_13.q.out
    index bfc8ad2..95cb09a 100644
    --- a/ql/src/test/results/clientpositive/vectorization_13.q.out
    +++ b/ql/src/test/results/clientpositive/vectorization_13.q.out
    @@ -81,7 +81,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((cfloat < 3569.0) and ((10.175 >= cdouble) and (cboolean1 <> 1))) or ((UDFToDouble(ctimestamp1) > 11.0) and ((UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639)))) (type: boolean)
    + predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > 11.0) and (UDFToDouble(ctimestamp2) <> 12.0) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                    Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)
    @@ -337,7 +337,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((cfloat < 3569.0) and ((10.175 >= cdouble) and (cboolean1 <> 1))) or ((UDFToDouble(ctimestamp1) > -1.388) and ((UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639)))) (type: boolean)
    + predicate: (((cfloat < 3569.0) and (10.175 >= cdouble) and (cboolean1 <> 1)) or ((UDFToDouble(ctimestamp1) > -1.388) and (UDFToDouble(ctimestamp2) <> -1.3359999999999999) and (UDFToDouble(ctinyint) < 9763215.5639))) (type: boolean)
                    Statistics: Num rows: 2730 Data size: 586959 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cboolean1 (type: boolean), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cfloat (type: float), cstring1 (type: string)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_15.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/vectorization_15.q.out b/ql/src/test/results/clientpositive/vectorization_15.q.out
    index 0031e94..da0e8e0 100644
    --- a/ql/src/test/results/clientpositive/vectorization_15.q.out
    +++ b/ql/src/test/results/clientpositive/vectorization_15.q.out
    @@ -77,7 +77,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((cstring2 like '%ss%') or ((cstring1 like '10%') or ((cint >= -75) and ((UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))))) (type: boolean)
    + predicate: ((cstring2 like '%ss%') or (cstring1 like '10%') or ((cint >= -75) and (UDFToShort(ctinyint) = csmallint) and (cdouble >= -3728.0))) (type: boolean)
                    Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cfloat (type: float), cboolean1 (type: boolean), cdouble (type: double), cstring1 (type: string), ctinyint (type: tinyint), cint (type: int), ctimestamp1 (type: timestamp)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_17.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/vectorization_17.q.out b/ql/src/test/results/clientpositive/vectorization_17.q.out
    index ece918c..3d58e68 100644
    --- a/ql/src/test/results/clientpositive/vectorization_17.q.out
    +++ b/ql/src/test/results/clientpositive/vectorization_17.q.out
    @@ -62,7 +62,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or ((UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble)))) (type: boolean)
    + predicate: (((cbigint > -23) and ((cdouble <> 988888.0) or (UDFToDouble(cint) > -863.257))) and ((ctinyint >= 33) or (UDFToLong(csmallint) >= cbigint) or (UDFToDouble(cfloat) = cdouble))) (type: boolean)
                    Statistics: Num rows: 4778 Data size: 1027287 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cfloat (type: float), cstring1 (type: string), cint (type: int), ctimestamp1 (type: timestamp), cdouble (type: double), cbigint (type: bigint), (UDFToDouble(cfloat) / UDFToDouble(ctinyint)) (type: double), (UDFToLong(cint) % cbigint) (type: bigint), (- cdouble) (type: double), (cdouble + (UDFToDouble(cfloat) / UDFToDouble(ctinyint))) (type: double), (cdouble / UDFToDouble(cint)) (type: double), (- (- cdouble)) (type: double), (9763215.5639 % UDFToDouble(cbigint)) (type: double), (2563.58 + (- (- cdouble))) (type: double)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_7.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/vectorization_7.q.out b/ql/src/test/results/clientpositive/vectorization_7.q.out
    index d4252a7..6e2a0ea 100644
    --- a/ql/src/test/results/clientpositive/vectorization_7.q.out
    +++ b/ql/src/test/results/clientpositive/vectorization_7.q.out
    @@ -68,7 +68,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or ((UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
    + predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss'))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > -15.0) and (3569.0 >= cdouble)))) (type: boolean)
                    Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)
    @@ -250,7 +250,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or ((UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss')))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
    + predicate: (((ctinyint <> 0) and ((UDFToDouble(ctimestamp1) <= 0.0) or (UDFToInteger(ctinyint) = cint) or (cstring2 like 'ss'))) and ((988888.0 < cdouble) or ((UDFToDouble(ctimestamp2) > 7.6850000000000005) and (3569.0 >= cdouble)))) (type: boolean)
                    Statistics: Num rows: 7281 Data size: 1565441 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cboolean1 (type: boolean), cbigint (type: bigint), csmallint (type: smallint), ctinyint (type: tinyint), ctimestamp1 (type: timestamp), cstring1 (type: string), (cbigint + cbigint) (type: bigint), (UDFToInteger(csmallint) % -257) (type: int), (- csmallint) (type: smallint), (- ctinyint) (type: tinyint), (UDFToInteger((- ctinyint)) + 17) (type: int), (cbigint * UDFToLong((- csmallint))) (type: bigint), (cint % UDFToInteger(csmallint)) (type: int), (- ctinyint) (type: tinyint), ((- ctinyint) % ctinyint) (type: tinyint)

    http://git-wip-us.apache.org/repos/asf/hive/blob/7f3e4811/ql/src/test/results/clientpositive/vectorization_8.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/vectorization_8.q.out b/ql/src/test/results/clientpositive/vectorization_8.q.out
    index c4ff1bc..c38fad1 100644
    --- a/ql/src/test/results/clientpositive/vectorization_8.q.out
    +++ b/ql/src/test/results/clientpositive/vectorization_8.q.out
    @@ -64,7 +64,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((cstring2 is not null and ((UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0))) or ((cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0)))) (type: boolean)
    + predicate: ((cstring2 is not null and (UDFToDouble(ctimestamp1) <= 10.0) and (UDFToDouble(ctimestamp2) <> 16.0)) or (cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0))) (type: boolean)
                    Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double)
    @@ -233,7 +233,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((cstring2 is not null and ((UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998))) or ((cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0)))) (type: boolean)
    + predicate: ((cstring2 is not null and (UDFToDouble(ctimestamp1) <= 12.503) and (UDFToDouble(ctimestamp2) <> 11.998)) or (cfloat < -6432.0) or (cboolean1 is not null and (cdouble = 988888.0))) (type: boolean)
                    Statistics: Num rows: 9216 Data size: 1981473 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: ctimestamp1 (type: timestamp), cdouble (type: double), cboolean1 (type: boolean), cstring1 (type: string), cfloat (type: float), (- cdouble) (type: double), (-5638.15 - cdouble) (type: double), (cdouble * -257.0) (type: double), (UDFToFloat(cint) + cfloat) (type: float), ((- cdouble) + UDFToDouble(cbigint)) (type: double), (- cdouble) (type: double), (-1.389 - UDFToDouble(cfloat)) (type: double), (- cfloat) (type: float), ((-5638.15 - cdouble) + UDFToDouble((UDFToFloat(cint) + cfloat))) (type: double)
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11498: HIVE Authorization v2 should not check permission for dummy entity (Dapeng Sun via Dong Chen)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/70631bb4
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/70631bb4
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/70631bb4

    Branch: refs/heads/hbase-metastore
    Commit: 70631bb4cff0c0cbd7055e843e091bfd4fae8e4e
    Parents: 7f3e481
    Author: Dapeng Sun <sdp@apache.org>
    Authored: Tue Aug 11 00:56:13 2015 -0400
    Committer: Dong Chen <dong1.chen@intel.com>
    Committed: Tue Aug 11 01:37:16 2015 -0400

    ----------------------------------------------------------------------
      ql/src/java/org/apache/hadoop/hive/ql/Driver.java | 5 ++++-
      .../queries/clientpositive/authorization_1_sql_std.q | 4 ++++
      .../results/clientpositive/authorization_1_sql_std.q.out | 11 +++++++++++
      3 files changed, 19 insertions(+), 1 deletion(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/70631bb4/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    index cc85f31..e7b7b55 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    @@ -787,7 +787,10 @@ public class Driver implements CommandProcessor {
          for(Entity privObject : privObjects){
            HivePrivilegeObjectType privObjType =
                AuthorizationUtils.getHivePrivilegeObjectType(privObject.getType());
    -
    + if(privObject.isDummy()) {
    + //do not authorize dummy readEntity or writeEntity
    + continue;
    + }
            if(privObject instanceof ReadEntity && !((ReadEntity)privObject).isDirect()){
              // In case of views, the underlying views or tables are not direct dependencies
              // and are not used for authorization checks.

    http://git-wip-us.apache.org/repos/asf/hive/blob/70631bb4/ql/src/test/queries/clientpositive/authorization_1_sql_std.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/authorization_1_sql_std.q b/ql/src/test/queries/clientpositive/authorization_1_sql_std.q
    index 82896a4..b7b6710 100644
    --- a/ql/src/test/queries/clientpositive/authorization_1_sql_std.q
    +++ b/ql/src/test/queries/clientpositive/authorization_1_sql_std.q
    @@ -6,6 +6,10 @@ set user.name=hive_admin_user;
      create table src_autho_test (key STRING, value STRING) ;

      set hive.security.authorization.enabled=true;
    +
    +--select dummy table
    +select 1;
    +
      set role ADMIN;
      --table grant to user


    http://git-wip-us.apache.org/repos/asf/hive/blob/70631bb4/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out b/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out
    index 44c2fbd..2315fd4 100644
    --- a/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out
    +++ b/ql/src/test/results/clientpositive/authorization_1_sql_std.q.out
    @@ -6,6 +6,17 @@ POSTHOOK: query: create table src_autho_test (key STRING, value STRING)
      POSTHOOK: type: CREATETABLE
      POSTHOOK: Output: database:default
      POSTHOOK: Output: default@src_autho_test
    +PREHOOK: query: --select dummy table
    +select 1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: _dummy_database@_dummy_table
    +#### A masked pattern was here ####
    +POSTHOOK: query: --select dummy table
    +select 1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: _dummy_database@_dummy_table
    +#### A masked pattern was here ####
    +1
      PREHOOK: query: set role ADMIN
      PREHOOK: type: SHOW_ROLES
      POSTHOOK: query: set role ADMIN
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11442: Remove commons-configuration.jar from Hive distribution


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/c4ceefb4
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/c4ceefb4
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/c4ceefb4

    Branch: refs/heads/hbase-metastore
    Commit: c4ceefb4c7a5e17780e43acbeabdcca872bef3ae
    Parents: df138f2
    Author: Daniel Dai <daijy@hortonworks.com>
    Authored: Wed Aug 12 10:12:02 2015 -0700
    Committer: Daniel Dai <daijy@hortonworks.com>
    Committed: Wed Aug 12 10:12:59 2015 -0700

    ----------------------------------------------------------------------
      jdbc/pom.xml | 1 +
      packaging/src/main/assembly/bin.xml | 3 ++-
      2 files changed, 3 insertions(+), 1 deletion(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/c4ceefb4/jdbc/pom.xml
    ----------------------------------------------------------------------
    diff --git a/jdbc/pom.xml b/jdbc/pom.xml
    index 4fee22c..371d709 100644
    --- a/jdbc/pom.xml
    +++ b/jdbc/pom.xml
    @@ -189,6 +189,7 @@
                    <artifactSet>
                      <excludes>
                        <exclude>org.apache.commons:commons-compress</exclude>
    + <exclude>commons-configuration:commons-configuration</exclude>
                        <exclude>org.apache.hadoop:*</exclude>
                        <exclude>org.apache.hive:hive-ant</exclude>
                        <exclude>org.apache.ant:*</exclude>

    http://git-wip-us.apache.org/repos/asf/hive/blob/c4ceefb4/packaging/src/main/assembly/bin.xml
    ----------------------------------------------------------------------
    diff --git a/packaging/src/main/assembly/bin.xml b/packaging/src/main/assembly/bin.xml
    index a1c176f..63253c5 100644
    --- a/packaging/src/main/assembly/bin.xml
    +++ b/packaging/src/main/assembly/bin.xml
    @@ -41,7 +41,8 @@
            <excludes>
              <exclude>org.apache.hadoop:*</exclude>
              <exclude>org.apache.hive.hcatalog:*</exclude>
    - <exclude>org.slf4j:*</exclude>
    + <exclude>org.slf4j:*</exclude>
    + <exclude>commons-configuration:commons-configuration</exclude>
            </excludes>
          </dependencySet>
          <dependencySet>
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11340 - Create ORC based table using like clause doesn't copy compression property (Yongzhi Chen, reviewed by Chao Sun)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/57ba795c
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/57ba795c
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/57ba795c

    Branch: refs/heads/hbase-metastore
    Commit: 57ba795cbf98f275b7bae75669d8769aa35d9ee5
    Parents: cfe9e48
    Author: Yongzhi Chen <yongzhi_chen@hotmail.com>
    Authored: Tue Aug 11 09:58:30 2015 -0700
    Committer: Chao Sun <sunchao@apache.org>
    Committed: Tue Aug 11 09:58:30 2015 -0700

    ----------------------------------------------------------------------
      .../apache/hadoop/hive/ql/io/orc/OrcConf.java | 2 +-
      .../apache/hadoop/hive/ql/io/orc/OrcSerde.java | 6 +-
      .../test/queries/clientpositive/create_like.q | 12 ++++
      .../results/clientpositive/create_like.q.out | 66 ++++++++++++++++++++
      4 files changed, 84 insertions(+), 2 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/57ba795c/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcConf.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcConf.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcConf.java
    index 81b822f..132889c 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcConf.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcConf.java
    @@ -133,7 +133,7 @@ public enum OrcConf {
        private String lookupValue(Properties tbl, Configuration conf) {
          String result = null;
          if (tbl != null) {
    - result = conf.get(attribute);
    + result = tbl.getProperty(attribute);
          }
          if (result == null && conf != null) {
            result = conf.get(attribute);

    http://git-wip-us.apache.org/repos/asf/hive/blob/57ba795c/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java
    index a381443..8beff4b 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcSerde.java
    @@ -22,6 +22,7 @@ import java.io.DataOutput;
      import java.io.IOException;
      import java.util.ArrayList;
      import java.util.Properties;
    +
      import org.apache.commons.logging.Log;
      import org.apache.commons.logging.LogFactory;
      import org.apache.hadoop.conf.Configuration;
    @@ -42,7 +43,7 @@ import org.apache.hadoop.io.Writable;
       * A serde class for ORC.
       * It transparently passes the object to/from the ORC file reader/writer.
       */
    -@SerDeSpec(schemaProps = {serdeConstants.LIST_COLUMNS, serdeConstants.LIST_COLUMN_TYPES})
    +@SerDeSpec(schemaProps = {serdeConstants.LIST_COLUMNS, serdeConstants.LIST_COLUMN_TYPES, OrcSerde.COMPRESSION})
      public class OrcSerde implements SerDe, VectorizedSerde {

        private static final Log LOG = LogFactory.getLog(OrcSerde.class);
    @@ -51,6 +52,7 @@ public class OrcSerde implements SerDe, VectorizedSerde {
        private ObjectInspector inspector = null;

        private VectorizedOrcSerde vos = null;
    + public static final String COMPRESSION = "orc.compress";

        final class OrcSerdeRow implements Writable {
          Object realRow;
    @@ -82,6 +84,8 @@ public class OrcSerde implements SerDe, VectorizedSerde {
          // NOTE: if "columns.types" is missing, all columns will be of String type
          String columnTypeProperty = table.getProperty(serdeConstants.LIST_COLUMN_TYPES);

    + String compressType = OrcConf.COMPRESS.getString(table, conf);
    +
          // Parse the configuration parameters
          ArrayList<String> columnNames = new ArrayList<String>();
          if (columnNameProperty != null && columnNameProperty.length() > 0) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/57ba795c/ql/src/test/queries/clientpositive/create_like.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/create_like.q b/ql/src/test/queries/clientpositive/create_like.q
    index 3b04702..bd39731 100644
    --- a/ql/src/test/queries/clientpositive/create_like.q
    +++ b/ql/src/test/queries/clientpositive/create_like.q
    @@ -83,3 +83,15 @@ DESCRIBE FORMATTED table6;

      drop table table5;

    +create table orc_table (
    +time string)
    +stored as ORC tblproperties ("orc.compress"="SNAPPY");
    +
    +create table orc_table_using_like like orc_table;
    +
    +describe formatted orc_table_using_like;
    +
    +drop table orc_table_using_like;
    +
    +drop table orc_table;
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/57ba795c/ql/src/test/results/clientpositive/create_like.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/create_like.q.out b/ql/src/test/results/clientpositive/create_like.q.out
    index c93b134..a373178 100644
    --- a/ql/src/test/results/clientpositive/create_like.q.out
    +++ b/ql/src/test/results/clientpositive/create_like.q.out
    @@ -579,3 +579,69 @@ POSTHOOK: query: drop table table5
      POSTHOOK: type: DROPTABLE
      POSTHOOK: Input: default@table5
      POSTHOOK: Output: default@table5
    +PREHOOK: query: create table orc_table (
    +time string)
    +stored as ORC tblproperties ("orc.compress"="SNAPPY")
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@orc_table
    +POSTHOOK: query: create table orc_table (
    +time string)
    +stored as ORC tblproperties ("orc.compress"="SNAPPY")
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@orc_table
    +PREHOOK: query: create table orc_table_using_like like orc_table
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@orc_table_using_like
    +POSTHOOK: query: create table orc_table_using_like like orc_table
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@orc_table_using_like
    +PREHOOK: query: describe formatted orc_table_using_like
    +PREHOOK: type: DESCTABLE
    +PREHOOK: Input: default@orc_table_using_like
    +POSTHOOK: query: describe formatted orc_table_using_like
    +POSTHOOK: type: DESCTABLE
    +POSTHOOK: Input: default@orc_table_using_like
    +# col_name data_type comment
    +
    +time string
    +
    +# Detailed Table Information
    +Database: default
    +#### A masked pattern was here ####
    +Retention: 0
    +#### A masked pattern was here ####
    +Table Type: MANAGED_TABLE
    +Table Parameters:
    + orc.compress SNAPPY
    +#### A masked pattern was here ####
    +
    +# Storage Information
    +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde
    +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat
    +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat
    +Compressed: No
    +Num Buckets: -1
    +Bucket Columns: []
    +Sort Columns: []
    +Storage Desc Params:
    + serialization.format 1
    +PREHOOK: query: drop table orc_table_using_like
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@orc_table_using_like
    +PREHOOK: Output: default@orc_table_using_like
    +POSTHOOK: query: drop table orc_table_using_like
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@orc_table_using_like
    +POSTHOOK: Output: default@orc_table_using_like
    +PREHOOK: query: drop table orc_table
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@orc_table
    +PREHOOK: Output: default@orc_table
    +POSTHOOK: query: drop table orc_table
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@orc_table
    +POSTHOOK: Output: default@orc_table
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11511 Output the message of orcfiledump when ORC files are not specified (Shinichi Yamashita via gates)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cfe9e484
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cfe9e484
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cfe9e484

    Branch: refs/heads/hbase-metastore
    Commit: cfe9e484f8624b590a728d758099c1fd5d069672
    Parents: 7e53685
    Author: Alan Gates <gates@hortonworks.com>
    Authored: Tue Aug 11 09:42:20 2015 -0700
    Committer: Alan Gates <gates@hortonworks.com>
    Committed: Tue Aug 11 09:42:20 2015 -0700

    ----------------------------------------------------------------------
      ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java | 4 ++++
      1 file changed, 4 insertions(+)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/cfe9e484/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
    index cbbec36..4acb810 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/FileDump.java
    @@ -82,6 +82,10 @@ public final class FileDump {
          boolean printTimeZone = cli.hasOption('t');
          boolean jsonFormat = cli.hasOption('j');
          String[] files = cli.getArgs();
    + if (files.length == 0) {
    + System.err.println("Error : ORC files are not specified");
    + return;
    + }
          if (dumpData) {
            printData(Arrays.asList(files), conf);
          } else {
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11449: "Capacity must be a power of two" error when HybridHashTableContainer memory threshold is too low (Jason Dere, reviewed by Sergey Shelukhin)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/2ee30c48
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/2ee30c48
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/2ee30c48

    Branch: refs/heads/hbase-metastore
    Commit: 2ee30c4859cd3427f0c74af536657d149cbad361
    Parents: 763cb02
    Author: Jason Dere <jdere@hortonworks.com>
    Authored: Tue Aug 11 17:07:47 2015 -0700
    Committer: Jason Dere <jdere@hortonworks.com>
    Committed: Tue Aug 11 17:07:47 2015 -0700

    ----------------------------------------------------------------------
      .../hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java | 2 ++
      1 file changed, 2 insertions(+)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/2ee30c48/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
    index 0a6461f..ad1246d 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java
    @@ -118,6 +118,8 @@ public class HybridHashTableContainer
          public HashPartition(int threshold, float loadFactor, int wbSize, long memUsage,
                               boolean createHashMap) {
            if (createHashMap) {
    + // Hash map should be at least the size of our designated wbSize
    + memUsage = Math.max(memUsage, wbSize);
              hashMap = new BytesBytesMultiHashMap(threshold, loadFactor, wbSize, memUsage);
            } else {
              hashMapSpilledOnCreation = true;
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11464: lineage info missing if there are multiple outputs (Jimmy)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/1a75644d
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/1a75644d
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/1a75644d

    Branch: refs/heads/hbase-metastore
    Commit: 1a75644d68c8c61fbafb4058fe45b7823492491c
    Parents: f26b256
    Author: Jimmy Xiang <jxiang@cloudera.com>
    Authored: Wed Aug 5 08:02:50 2015 -0700
    Committer: Jimmy Xiang <jxiang@cloudera.com>
    Committed: Thu Aug 13 13:44:03 2015 -0700

    ----------------------------------------------------------------------
      .../java/org/apache/hadoop/hive/ql/Driver.java | 8 ++--
      .../hadoop/hive/ql/hooks/LineageInfo.java | 9 ++--
      .../hadoop/hive/ql/hooks/LineageLogger.java | 44 +++++++++++++-------
      .../ql/optimizer/lineage/ExprProcFactory.java | 9 ++--
      .../hive/ql/optimizer/lineage/LineageCtx.java | 34 +++++++++++----
      .../ql/optimizer/lineage/OpProcFactory.java | 10 ++---
      ql/src/test/queries/clientpositive/lineage3.q | 15 +++++++
      .../test/results/clientpositive/lineage3.q.out | 32 +++++++++++++-
      8 files changed, 118 insertions(+), 43 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    index e7b7b55..c0c1b2e 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    @@ -441,8 +441,11 @@ public class Driver implements CommandProcessor {
            // to avoid returning sensitive data
            String queryStr = HookUtils.redactLogString(conf, command);

    + // get the output schema
    + schema = getSchema(sem, conf);
    +
            plan = new QueryPlan(queryStr, sem, perfLogger.getStartTime(PerfLogger.DRIVER_RUN), queryId,
    - SessionState.get().getHiveOperation(), getSchema(sem, conf));
    + SessionState.get().getHiveOperation(), schema);

            conf.setVar(HiveConf.ConfVars.HIVEQUERYSTRING, queryStr);

    @@ -454,9 +457,6 @@ public class Driver implements CommandProcessor {
              plan.getFetchTask().initialize(conf, plan, null);
            }

    - // get the output schema
    - schema = getSchema(sem, conf);
    -
            //do the authorization check
            if (!sem.skipAuthorization() &&
                HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
    index fe0841e..2806c54 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageInfo.java
    @@ -22,7 +22,6 @@ import java.io.Serializable;
      import java.util.Collections;
      import java.util.LinkedHashMap;
      import java.util.LinkedHashSet;
    -import java.util.List;
      import java.util.Map;
      import java.util.Set;

    @@ -375,9 +374,9 @@ public class LineageInfo implements Serializable {
          private String expr;

          /**
    - * The list of base columns that the particular column depends on.
    + * The set of base columns that the particular column depends on.
           */
    - private List<BaseColumnInfo> baseCols;
    + private Set<BaseColumnInfo> baseCols;

          /**
           * @return the type
    @@ -410,14 +409,14 @@ public class LineageInfo implements Serializable {
          /**
           * @return the baseCols
           */
    - public List<BaseColumnInfo> getBaseCols() {
    + public Set<BaseColumnInfo> getBaseCols() {
            return baseCols;
          }

          /**
           * @param baseCols the baseCols to set
           */
    - public void setBaseCols(List<BaseColumnInfo> baseCols) {
    + public void setBaseCols(Set<BaseColumnInfo> baseCols) {
            this.baseCols = baseCols;
          }


    http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
    index d615372..3c6ce94 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/LineageLogger.java
    @@ -33,6 +33,7 @@ import org.apache.commons.io.output.StringBuilderWriter;
      import org.apache.commons.lang.StringUtils;
      import org.apache.commons.logging.Log;
      import org.apache.commons.logging.LogFactory;
    +import org.apache.hadoop.hive.common.ObjectPair;
      import org.apache.hadoop.hive.conf.HiveConf;
      import org.apache.hadoop.hive.metastore.api.FieldSchema;
      import org.apache.hadoop.hive.metastore.api.Table;
    @@ -147,6 +148,7 @@ public class LineageLogger implements ExecuteWithHookContext {
                // Don't emit user/timestamp info in test mode,
                // so that the test golden output file is fixed.
                long queryTime = plan.getQueryStartTime().longValue();
    + if (queryTime == 0) queryTime = System.currentTimeMillis();
                writer.name("user").value(hookContext.getUgi().getUserName());
                writer.name("timestamp").value(queryTime/1000);
                writer.name("jobIds");
    @@ -209,23 +211,28 @@ public class LineageLogger implements ExecuteWithHookContext {
         * For each target column, find out its sources based on the dependency index.
         */
        private List<Edge> getEdges(QueryPlan plan, Index index) {
    - List<FieldSchema> fieldSchemas = plan.getResultSchema().getFieldSchemas();
    - int fields = fieldSchemas == null ? 0 : fieldSchemas.size();
    - SelectOperator finalSelOp = index.getFinalSelectOp();
    + LinkedHashMap<String, ObjectPair<SelectOperator,
    + org.apache.hadoop.hive.ql.metadata.Table>> finalSelOps = index.getFinalSelectOps();
    + Set<Vertex> allTargets = new LinkedHashSet<Vertex>();
    + Map<String, Vertex> allSources = new LinkedHashMap<String, Vertex>();
          List<Edge> edges = new ArrayList<Edge>();
    - if (finalSelOp != null && fields > 0) {
    - Map<ColumnInfo, Dependency> colMap = index.getDependencies(finalSelOp);
    - List<Dependency> dependencies = colMap != null ? Lists.newArrayList(colMap.values()) : null;
    - if (dependencies == null || dependencies.size() != fields) {
    - log("Result schema has " + fields
    - + " fields, but we don't get as many dependencies");
    + for (ObjectPair<SelectOperator,
    + org.apache.hadoop.hive.ql.metadata.Table> pair: finalSelOps.values()) {
    + List<FieldSchema> fieldSchemas = plan.getResultSchema().getFieldSchemas();
    + SelectOperator finalSelOp = pair.getFirst();
    + org.apache.hadoop.hive.ql.metadata.Table t = pair.getSecond();
    + String destTableName = null;
    + List<String> colNames = null;
    + if (t != null) {
    + destTableName = t.getDbName() + "." + t.getTableName();
    + fieldSchemas = t.getCols();
            } else {
    - String destTableName = null;
    - List<String> colNames = null;
              // Based on the plan outputs, find out the target table name and column names.
              for (WriteEntity output : plan.getOutputs()) {
    - if (output.getType() == Entity.Type.TABLE) {
    - org.apache.hadoop.hive.ql.metadata.Table t = output.getTable();
    + Entity.Type entityType = output.getType();
    + if (entityType == Entity.Type.TABLE
    + || entityType == Entity.Type.PARTITION) {
    + t = output.getTable();
                  destTableName = t.getDbName() + "." + t.getTableName();
                  List<FieldSchema> cols = t.getCols();
                  if (cols != null && !cols.isEmpty()) {
    @@ -234,10 +241,15 @@ public class LineageLogger implements ExecuteWithHookContext {
                  break;
                }
              }
    -
    + }
    + int fields = fieldSchemas.size();
    + Map<ColumnInfo, Dependency> colMap = index.getDependencies(finalSelOp);
    + List<Dependency> dependencies = colMap != null ? Lists.newArrayList(colMap.values()) : null;
    + if (dependencies == null || dependencies.size() != fields) {
    + log("Result schema has " + fields
    + + " fields, but we don't get as many dependencies");
    + } else {
              // Go through each target column, generate the lineage edges.
    - Set<Vertex> allTargets = new LinkedHashSet<Vertex>();
    - Map<String, Vertex> allSources = new LinkedHashMap<String, Vertex>();
              for (int i = 0; i < fields; i++) {
                Vertex target = new Vertex(
                  getTargetFieldName(i, destTableName, colNames, fieldSchemas));

    http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java
    index 455a525..38040e3 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/ExprProcFactory.java
    @@ -24,6 +24,7 @@ import java.util.LinkedHashMap;
      import java.util.LinkedHashSet;
      import java.util.List;
      import java.util.Map;
    +import java.util.Set;
      import java.util.Stack;

      import org.apache.hadoop.hive.metastore.api.FieldSchema;
    @@ -124,7 +125,7 @@ public class ExprProcFactory {
              bci_set.addAll(child_dep.getBaseCols());
            }

    - dep.setBaseCols(new ArrayList<BaseColumnInfo>(bci_set));
    + dep.setBaseCols(bci_set);
            dep.setType(new_type);

            return dep;
    @@ -146,7 +147,7 @@ public class ExprProcFactory {
            // Create a dependency that has no basecols
            Dependency dep = new Dependency();
            dep.setType(LineageInfo.DependencyType.SIMPLE);
    - dep.setBaseCols(new ArrayList<BaseColumnInfo>());
    + dep.setBaseCols(new LinkedHashSet<BaseColumnInfo>());

            return dep;
          }
    @@ -218,9 +219,9 @@ public class ExprProcFactory {
            Dependency dep = lctx.getIndex().getDependency(inpOp, internalName);
            if ((tabAlias == null || tabAlias.startsWith("_") || tabAlias.startsWith("$"))
                && (dep != null && dep.getType() == DependencyType.SIMPLE)) {
    - List<BaseColumnInfo> baseCols = dep.getBaseCols();
    + Set<BaseColumnInfo> baseCols = dep.getBaseCols();
              if (baseCols != null && !baseCols.isEmpty()) {
    - BaseColumnInfo baseCol = baseCols.get(0);
    + BaseColumnInfo baseCol = baseCols.iterator().next();
                tabAlias = baseCol.getTabAlias().getAlias();
                alias = baseCol.getColumn().getName();
              }

    http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
    index d26d8da..c33d775 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/LineageCtx.java
    @@ -25,7 +25,9 @@ import java.util.LinkedHashSet;
      import java.util.Map;
      import java.util.Set;

    +import org.apache.hadoop.hive.common.ObjectPair;
      import org.apache.hadoop.hive.ql.exec.ColumnInfo;
    +import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
      import org.apache.hadoop.hive.ql.exec.Operator;
      import org.apache.hadoop.hive.ql.exec.SelectOperator;
      import org.apache.hadoop.hive.ql.hooks.LineageInfo;
    @@ -33,6 +35,7 @@ import org.apache.hadoop.hive.ql.hooks.LineageInfo.BaseColumnInfo;
      import org.apache.hadoop.hive.ql.hooks.LineageInfo.Dependency;
      import org.apache.hadoop.hive.ql.hooks.LineageInfo.Predicate;
      import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx;
    +import org.apache.hadoop.hive.ql.metadata.Table;
      import org.apache.hadoop.hive.ql.parse.ParseContext;
      import org.apache.hadoop.hive.ql.plan.OperatorDesc;

    @@ -59,7 +62,11 @@ public class LineageCtx implements NodeProcessorCtx {
           */
          private final Map<Operator<? extends OperatorDesc>, Set<Predicate>> condMap;

    - private SelectOperator finalSelectOp;
    + /**
    + * A map from a final select operator id to the select operator
    + * and the corresponding target table in case an insert into query.
    + */
    + private LinkedHashMap<String, ObjectPair<SelectOperator, Table>> finalSelectOps;

          /**
           * Constructor.
    @@ -69,6 +76,8 @@ public class LineageCtx implements NodeProcessorCtx {
              new LinkedHashMap<Operator<? extends OperatorDesc>,
                                LinkedHashMap<ColumnInfo, Dependency>>();
            condMap = new HashMap<Operator<? extends OperatorDesc>, Set<Predicate>>();
    + finalSelectOps =
    + new LinkedHashMap<String, ObjectPair<SelectOperator, Table>>();
          }

          /**
    @@ -146,7 +155,7 @@ public class LineageCtx implements NodeProcessorCtx {
              old_dep.setType(new_type);
              Set<BaseColumnInfo> bci_set = new LinkedHashSet<BaseColumnInfo>(old_dep.getBaseCols());
              bci_set.addAll(dep.getBaseCols());
    - old_dep.setBaseCols(new ArrayList<BaseColumnInfo>(bci_set));
    + old_dep.setBaseCols(bci_set);
              // TODO: Fix the expressions later.
              old_dep.setExpr(null);
            }
    @@ -179,16 +188,27 @@ public class LineageCtx implements NodeProcessorCtx {
            return condMap.get(op);
          }

    - public void setFinalSelectOp(SelectOperator sop) {
    - finalSelectOp = sop;
    + public void addFinalSelectOp(
    + SelectOperator sop, Operator<? extends OperatorDesc> sinkOp) {
    + String operatorId = sop.getOperatorId();
    + if (!finalSelectOps.containsKey(operatorId)) {
    + Table table = null;
    + if (sinkOp instanceof FileSinkOperator) {
    + FileSinkOperator fso = (FileSinkOperator) sinkOp;
    + table = fso.getConf().getTable();
    + }
    + finalSelectOps.put(operatorId,
    + new ObjectPair<SelectOperator, Table>(sop, table));
    + }
          }

    - public SelectOperator getFinalSelectOp() {
    - return finalSelectOp;
    + public LinkedHashMap<String,
    + ObjectPair<SelectOperator, Table>> getFinalSelectOps() {
    + return finalSelectOps;
          }

          public void clear() {
    - finalSelectOp = null;
    + finalSelectOps.clear();
            depMap.clear();
          }
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
    index f670db8..5c5d0d6 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/lineage/OpProcFactory.java
    @@ -120,7 +120,7 @@ public class OpProcFactory {
            }

            dep.setType(new_type);
    - dep.setBaseCols(new ArrayList<BaseColumnInfo>(col_set));
    + dep.setBaseCols(col_set);

            boolean isScript = op instanceof ScriptOperator;

    @@ -186,7 +186,7 @@ public class OpProcFactory {

              // Populate the dependency
              dep.setType(LineageInfo.DependencyType.SIMPLE);
    - dep.setBaseCols(new ArrayList<BaseColumnInfo>());
    + dep.setBaseCols(new LinkedHashSet<BaseColumnInfo>());
              dep.getBaseCols().add(bci);

              // Put the dependency in the map
    @@ -396,7 +396,7 @@ public class OpProcFactory {
            }
            if (op == null || (op.getChildOperators().isEmpty()
                && op instanceof FileSinkOperator)) {
    - lctx.getIndex().setFinalSelectOp(sop);
    + lctx.getIndex().addFinalSelectOp(sop, op);
            }

            return null;
    @@ -450,7 +450,7 @@ public class OpProcFactory {
                  new_type = LineageCtx.getNewDependencyType(expr_dep.getType(), new_type);
                  bci_set.addAll(expr_dep.getBaseCols());
                  if (expr_dep.getType() == LineageInfo.DependencyType.SIMPLE) {
    - BaseColumnInfo col = expr_dep.getBaseCols().get(0);
    + BaseColumnInfo col = expr_dep.getBaseCols().iterator().next();
                    Table t = col.getTabAlias().getTable();
                    if (t != null) {
                      sb.append(t.getDbName()).append(".").append(t.getTableName()).append(".");
    @@ -514,7 +514,7 @@ public class OpProcFactory {
                }
              }

    - dep.setBaseCols(new ArrayList<BaseColumnInfo>(bci_set));
    + dep.setBaseCols(bci_set);
              dep.setType(new_type);
              lctx.getIndex().putDependency(gop, col_infos.get(cnt++), dep);
            }

    http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/test/queries/clientpositive/lineage3.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/lineage3.q b/ql/src/test/queries/clientpositive/lineage3.q
    index 53fff0f..c24ff7d 100644
    --- a/ql/src/test/queries/clientpositive/lineage3.q
    +++ b/ql/src/test/queries/clientpositive/lineage3.q
    @@ -1,5 +1,20 @@
      set hive.exec.post.hooks=org.apache.hadoop.hive.ql.hooks.LineageLogger;

    +drop table if exists d1;
    +create table d1(a int);
    +
    +from (select a.ctinyint x, b.cstring1 y
    +from alltypesorc a join alltypesorc b on a.cint = b.cbigint) t
    +insert into table d1 select x + length(y);
    +
    +drop table if exists d2;
    +create table d2(b varchar(128));
    +
    +from (select a.ctinyint x, b.cstring1 y
    +from alltypesorc a join alltypesorc b on a.cint = b.cbigint) t
    +insert into table d1 select x where y is null
    +insert into table d2 select y where x > 0;
    +
      drop table if exists t;
      create table t as
      select * from

    http://git-wip-us.apache.org/repos/asf/hive/blob/1a75644d/ql/src/test/results/clientpositive/lineage3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/lineage3.q.out b/ql/src/test/results/clientpositive/lineage3.q.out
    index 75d88f8..b6b4e0b 100644
    --- a/ql/src/test/results/clientpositive/lineage3.q.out
    +++ b/ql/src/test/results/clientpositive/lineage3.q.out
    @@ -1,3 +1,31 @@
    +PREHOOK: query: drop table if exists d1
    +PREHOOK: type: DROPTABLE
    +PREHOOK: query: create table d1(a int)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@d1
    +PREHOOK: query: from (select a.ctinyint x, b.cstring1 y
    +from alltypesorc a join alltypesorc b on a.cint = b.cbigint) t
    +insert into table d1 select x + length(y)
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@alltypesorc
    +PREHOOK: Output: default@d1
    +{"version":"1.0","engine":"mr","hash":"4c9b7b8d89403cef78668f15d393e542","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x + length(y)","edges":[{"sources":[1,2],"targets":[0],"expression":"(UDFToInteger(a.ctinyint) + length(a.cstring1))","edgeType":"PROJECTION"},{"sources":[3,4],"targets":[0],"expression":"(UDFToLong(a.cint) = a.cbigint)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
    +PREHOOK: query: drop table if exists d2
    +PREHOOK: type: DROPTABLE
    +PREHOOK: query: create table d2(b varchar(128))
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@d2
    +PREHOOK: query: from (select a.ctinyint x, b.cstring1 y
    +from alltypesorc a join alltypesorc b on a.cint = b.cbigint) t
    +insert into table d1 select x where y is null
    +insert into table d2 select y where x > 0
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@alltypesorc
    +PREHOOK: Output: default@d1
    +PREHOOK: Output: default@d2
    +{"version":"1.0","engine":"mr","hash":"8703e4091ebd4c96afd3cac83e3a2957","queryText":"from (select a.ctinyint x, b.cstring1 y\nfrom alltypesorc a join alltypesorc b on a.cint = b.cbigint) t\ninsert into table d1 select x where y is null\ninsert into table d2 select y where x > 0","edges":[{"sources":[2],"targets":[0],"expression":"UDFToInteger(x)","edgeType":"PROJECTION"},{"sources":[3],"targets":[0,1],"expression":"t.y is null","edgeType":"PREDICATE"},{"sources":[4,5],"targets":[0,1],"expression":"(UDFToLong(a.cint) = b.cbigint)","edgeType":"PREDICATE"},{"sources":[3],"targets":[1],"expression":"CAST( y AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(t.x > 0)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.d1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.d2.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.ctinyint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.
      cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
      PREHOOK: query: drop table if exists t
      PREHOOK: type: DROPTABLE
      PREHOOK: query: create table t as
    @@ -23,7 +51,7 @@ where cint is not null and cint < 0 order by cint, cs limit 5
      PREHOOK: type: QUERY
      PREHOOK: Input: default@alltypesorc
      PREHOOK: Output: default@dest_l1@ds=today
    -{"version":"1.0","engine":"mr","hash":"2b5891d094ff74e23ec6acf5b4990f45","queryText":"insert into table dest_l1 partition (ds='today')\nselect cint, cast(cstring1 as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cint < 0 order by cint, cs limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( alltypesorc.cstring1 AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cint < 0))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"cint"},{"id":1,"vertexType":"COLUMN","vertexId":"cs"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
    +{"version":"1.0","engine":"mr","hash":"2b5891d094ff74e23ec6acf5b4990f45","queryText":"insert into table dest_l1 partition (ds='today')\nselect cint, cast(cstring1 as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cint < 0 order by cint, cs limit 5","edges":[{"sources":[2],"targets":[0],"edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( alltypesorc.cstring1 AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cint < 0))","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"}]}
      PREHOOK: query: insert into table dest_l1 partition (ds='tomorrow')
      select min(cint), cast(min(cstring1) as varchar(128)) as cs
      from alltypesorc
    @@ -33,7 +61,7 @@ having min(cbigint) > 10
      PREHOOK: type: QUERY
      PREHOOK: Input: default@alltypesorc
      PREHOOK: Output: default@dest_l1@ds=tomorrow
    -{"version":"1.0","engine":"mr","hash":"4ad6338a8abfe3fe0342198fcbd1f11d","queryText":"insert into table dest_l1 partition (ds='tomorrow')\nselect min(cint), cast(min(cstring1) as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cboolean1 = true\ngroup by csmallint\nhaving min(cbigint) > 10","edges":[{"sources":[2],"targets":[0],"expression":"min(default.alltypesorc.cint)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( min(default.alltypesorc.cstring1) AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2,4],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cboolean1 = true))","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1],"expression":"(min(default.alltypesorc.cbigint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"c0"},{"id":1,"vertexType":"COLUMN","vertexId":"cs"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},{"id":3,"vertexType":"COLUMN",
      "vertexId":"default.alltypesorc.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
    +{"version":"1.0","engine":"mr","hash":"4ad6338a8abfe3fe0342198fcbd1f11d","queryText":"insert into table dest_l1 partition (ds='tomorrow')\nselect min(cint), cast(min(cstring1) as varchar(128)) as cs\nfrom alltypesorc\nwhere cint is not null and cboolean1 = true\ngroup by csmallint\nhaving min(cbigint) > 10","edges":[{"sources":[2],"targets":[0],"expression":"min(default.alltypesorc.cint)","edgeType":"PROJECTION"},{"sources":[3],"targets":[1],"expression":"CAST( min(default.alltypesorc.cstring1) AS varchar(128))","edgeType":"PROJECTION"},{"sources":[2,4],"targets":[0,1],"expression":"(alltypesorc.cint is not null and (alltypesorc.cboolean1 = true))","edgeType":"PREDICATE"},{"sources":[5],"targets":[0,1],"expression":"(min(default.alltypesorc.cbigint) > 10)","edgeType":"PREDICATE"}],"vertices":[{"id":0,"vertexType":"COLUMN","vertexId":"default.dest_l1.a"},{"id":1,"vertexType":"COLUMN","vertexId":"default.dest_l1.b"},{"id":2,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cint"},
      {"id":3,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cstring1"},{"id":4,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cboolean1"},{"id":5,"vertexType":"COLUMN","vertexId":"default.alltypesorc.cbigint"}]}
      PREHOOK: query: select cint, rank() over(order by cint) from alltypesorc
      where cint > 10 and cint < 10000 limit 10
      PREHOOK: type: QUERY
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11493: Predicate with integer column equals double evaluates to false (Pengcheng Xiong, reviewed by Hari Sankar Sivarama Subramaniyan)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/b8f1ae11
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/b8f1ae11
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/b8f1ae11

    Branch: refs/heads/hbase-metastore
    Commit: b8f1ae110616a8fe162f79140c785f76be76fc67
    Parents: 6e76291
    Author: Pengcheng Xiong <pxiong@apache.org>
    Authored: Thu Aug 13 21:01:54 2015 -0700
    Committer: Pengcheng Xiong <pxiong@apache.org>
    Committed: Thu Aug 13 21:02:40 2015 -0700

    ----------------------------------------------------------------------
      .../hive/ql/parse/TypeCheckProcFactory.java | 2 +-
      .../clientpositive/cast_tinyint_to_double.q | 7 ++++
      .../clientpositive/cast_tinyint_to_double.q.out | 38 ++++++++++++++++++++
      .../clientpositive/infer_const_type.q.out | 7 ++--
      .../clientpositive/spark/vectorization_0.q.out | 2 +-
      .../spark/vectorization_short_regress.q.out | 20 +++++------
      .../clientpositive/tez/vectorization_0.q.out | 2 +-
      .../tez/vectorization_short_regress.q.out | 20 +++++------
      .../clientpositive/vectorization_0.q.out | 2 +-
      .../vectorization_short_regress.q.out | 20 +++++------
      10 files changed, 84 insertions(+), 36 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
    index cd68f4e..ab5d006 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java
    @@ -1034,7 +1034,7 @@ public class TypeCheckProcFactory {
                    // we'll try again to convert it to double
                    // however, if we already tried this, or the column is NUMBER type and
                    // the operator is EQUAL, return false due to the type mismatch
    - if (triedDouble ||
    + if (triedDouble &&
                        (genericUDF instanceof GenericUDFOPEqual
                        && !columnType.equals(serdeConstants.STRING_TYPE_NAME))) {
                      return new ExprNodeConstantDesc(false);

    http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/queries/clientpositive/cast_tinyint_to_double.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/cast_tinyint_to_double.q b/ql/src/test/queries/clientpositive/cast_tinyint_to_double.q
    new file mode 100644
    index 0000000..59c5e89
    --- /dev/null
    +++ b/ql/src/test/queries/clientpositive/cast_tinyint_to_double.q
    @@ -0,0 +1,7 @@
    +drop table t;
    +CREATE TABLE t(c tinyint);
    +insert overwrite table t select 10 from src limit 1;
    +
    +select * from t where c = 10.0;
    +
    +select * from t where c = -10.0;
    \ No newline at end of file

    http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/cast_tinyint_to_double.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/cast_tinyint_to_double.q.out b/ql/src/test/results/clientpositive/cast_tinyint_to_double.q.out
    new file mode 100644
    index 0000000..c29df65
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/cast_tinyint_to_double.q.out
    @@ -0,0 +1,38 @@
    +PREHOOK: query: drop table t
    +PREHOOK: type: DROPTABLE
    +POSTHOOK: query: drop table t
    +POSTHOOK: type: DROPTABLE
    +PREHOOK: query: CREATE TABLE t(c tinyint)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@t
    +POSTHOOK: query: CREATE TABLE t(c tinyint)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@t
    +PREHOOK: query: insert overwrite table t select 10 from src limit 1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Output: default@t
    +POSTHOOK: query: insert overwrite table t select 10 from src limit 1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: default@t
    +POSTHOOK: Lineage: t.c EXPRESSION []
    +PREHOOK: query: select * from t where c = 10.0
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@t
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from t where c = 10.0
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@t
    +#### A masked pattern was here ####
    +10
    +PREHOOK: query: select * from t where c = -10.0
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@t
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from t where c = -10.0
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@t
    +#### A masked pattern was here ####

    http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/infer_const_type.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/infer_const_type.q.out b/ql/src/test/results/clientpositive/infer_const_type.q.out
    index 6368e4a..05c6a45 100644
    --- a/ql/src/test/results/clientpositive/infer_const_type.q.out
    +++ b/ql/src/test/results/clientpositive/infer_const_type.q.out
    @@ -102,6 +102,7 @@ POSTHOOK: type: QUERY
      POSTHOOK: Input: default@infertypes
      #### A masked pattern was here ####
      127 32767 12345 -12345 906.0 -307.0 1234
    +WARNING: Comparing a bigint and a double may result in a loss of precision.
      PREHOOK: query: -- all should return false as all numbers exceeed the largest number
      -- which could be represented by the corresponding type
      -- and string_col = long_const should return false
    @@ -136,7 +137,7 @@ STAGE PLANS:
                  alias: infertypes
                  Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: false (type: boolean)
    + predicate: ((UDFToDouble(ti) = 128.0) or (UDFToDouble(si) = 32768.0) or (UDFToDouble(i) = 2.147483648E9) or (UDFToDouble(bi) = 9.223372036854776E18)) (type: boolean)
                    Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: ti (type: tinyint), si (type: smallint), i (type: int), bi (type: bigint), fl (type: float), db (type: double), str (type: string)
    @@ -156,6 +157,7 @@ STAGE PLANS:
            Processor Tree:
              ListSink

    +WARNING: Comparing a bigint and a double may result in a loss of precision.
      PREHOOK: query: SELECT * FROM infertypes WHERE
        ti = '128' OR
        si = 32768 OR
    @@ -200,7 +202,7 @@ STAGE PLANS:
                  alias: infertypes
                  Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: false (type: boolean)
    + predicate: ((UDFToDouble(ti) = 127.0) or (UDFToDouble(si) = 327.0) or (UDFToDouble(i) = -100.0)) (type: boolean)
                    Statistics: Num rows: 1 Data size: 117 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: ti (type: tinyint), si (type: smallint), i (type: int), bi (type: bigint), fl (type: float), db (type: double), str (type: string)
    @@ -234,6 +236,7 @@ POSTHOOK: query: SELECT * FROM infertypes WHERE
      POSTHOOK: type: QUERY
      POSTHOOK: Input: default@infertypes
      #### A masked pattern was here ####
    +127 32767 12345 -12345 906.0 -307.0 1234
      PREHOOK: query: EXPLAIN SELECT * FROM infertypes WHERE
        ti < '127.0' AND
        i > '100.0' AND

    http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
    index 90bcc1b..3ad059c 100644
    --- a/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
    +++ b/ql/src/test/results/clientpositive/spark/vectorization_0.q.out
    @@ -994,7 +994,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble)) (type: boolean)
    + predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble) or ((UDFToShort(ctinyint) >= csmallint) and (cboolean2 = 1) and (3569.0 = UDFToDouble(ctinyint)))) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)

    http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
    index 6bd1bb2..3d17aba 100644
    --- a/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
    +++ b/ql/src/test/results/clientpositive/spark/vectorization_short_regress.q.out
    @@ -2085,23 +2085,23 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 377237 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
    - Statistics: Num rows: 606 Data size: 18603 Basic stats: COMPLETE Column stats: NONE
    + predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or (2563.58 = UDFToDouble(ctinyint)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
    + Statistics: Num rows: 2654 Data size: 81476 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cdouble (type: double), cfloat (type: float)
                            outputColumnNames: _col0, _col1
    - Statistics: Num rows: 606 Data size: 18603 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 2654 Data size: 81476 Basic stats: COMPLETE Column stats: NONE
                            Group By Operator
                              aggregations: var_samp(_col0), count(_col1), sum(_col1), var_pop(_col0), stddev_pop(_col0), sum(_col0)
                              keys: _col0 (type: double)
                              mode: hash
                              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
    - Statistics: Num rows: 606 Data size: 18603 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 2654 Data size: 81476 Basic stats: COMPLETE Column stats: NONE
                              Reduce Output Operator
                                key expressions: _col0 (type: double)
                                sort order: +
                                Map-reduce partition columns: _col0 (type: double)
    - Statistics: Num rows: 606 Data size: 18603 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 2654 Data size: 81476 Basic stats: COMPLETE Column stats: NONE
                                value expressions: _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: bigint), _col3 (type: double), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: double)
                  Execution mode: vectorized
              Reducer 2
    @@ -2111,25 +2111,25 @@ STAGE PLANS:
                      keys: KEY._col0 (type: double)
                      mode: mergepartial
                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
    - Statistics: Num rows: 303 Data size: 9301 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 40738 Basic stats: COMPLETE Column stats: NONE
                      Select Operator
                        expressions: _col0 (type: double), _col1 (type: double), _col5 (type: double), (_col0 + _col1) (type: double), (_col0 * 762.0) (type: double), _col6 (type: double), (-863.257 % (_col0 * 762.0)) (type: double), (2563.58 * _col1) (type: double), (- _col1) (type: double), _col2 (type: bigint), ((2563.58 * _col1) + -5638.15) (type: double), ((- _col1) * ((2563.58 * _col1) + -5638.15)) (type: double), _col3 (type: double), _col4 (type: double), (_col0 - (- _col1)) (type: double)
                        outputColumnNames: _col0, _col1, _col10, _col11, _col12, _col13, _col14, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
    - Statistics: Num rows: 303 Data size: 9301 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 40738 Basic stats: COMPLETE Column stats: NONE
                        Reduce Output Operator
                          key expressions: _col0 (type: double)
                          sort order: +
    - Statistics: Num rows: 303 Data size: 9301 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 40738 Basic stats: COMPLETE Column stats: NONE
                          value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: bigint), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double)
              Reducer 3
                  Reduce Operator Tree:
                    Select Operator
                      expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: double), VALUE._col11 (type: double), VALUE._col12 (type: double), VALUE._col13 (type: double), VALUE._col12 (type: double)
                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
    - Statistics: Num rows: 303 Data size: 9301 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 40738 Basic stats: COMPLETE Column stats: NONE
                      File Output Operator
                        compressed: false
    - Statistics: Num rows: 303 Data size: 9301 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 40738 Basic stats: COMPLETE Column stats: NONE
                        table:
                            input format: org.apache.hadoop.mapred.TextInputFormat
                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

    http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorization_0.q.out b/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
    index cfbe9ce..18e042d 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorization_0.q.out
    @@ -994,7 +994,7 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble)) (type: boolean)
    + predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble) or ((UDFToShort(ctinyint) >= csmallint) and (cboolean2 = 1) and (3569.0 = UDFToDouble(ctinyint)))) (type: boolean)
                          Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)

    http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
    index 5e0d42c..59b457a 100644
    --- a/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
    +++ b/ql/src/test/results/clientpositive/tez/vectorization_short_regress.q.out
    @@ -2085,23 +2085,23 @@ STAGE PLANS:
                        alias: alltypesorc
                        Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                        Filter Operator
    - predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
    - Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
    + predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or (2563.58 = UDFToDouble(ctinyint)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
    + Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                          Select Operator
                            expressions: cdouble (type: double), cfloat (type: float)
                            outputColumnNames: _col0, _col1
    - Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                            Group By Operator
                              aggregations: var_samp(_col0), count(_col1), sum(_col1), var_pop(_col0), stddev_pop(_col0), sum(_col0)
                              keys: _col0 (type: double)
                              mode: hash
                              outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
    - Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                              Reduce Output Operator
                                key expressions: _col0 (type: double)
                                sort order: +
                                Map-reduce partition columns: _col0 (type: double)
    - Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                                value expressions: _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: bigint), _col3 (type: double), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: double)
                  Execution mode: vectorized
              Reducer 2
    @@ -2111,25 +2111,25 @@ STAGE PLANS:
                      keys: KEY._col0 (type: double)
                      mode: mergepartial
                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
    - Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                      Select Operator
                        expressions: _col0 (type: double), _col1 (type: double), _col5 (type: double), (_col0 + _col1) (type: double), (_col0 * 762.0) (type: double), _col6 (type: double), (-863.257 % (_col0 * 762.0)) (type: double), (2563.58 * _col1) (type: double), (- _col1) (type: double), _col2 (type: bigint), ((2563.58 * _col1) + -5638.15) (type: double), ((- _col1) * ((2563.58 * _col1) + -5638.15)) (type: double), _col3 (type: double), _col4 (type: double), (_col0 - (- _col1)) (type: double)
                        outputColumnNames: _col0, _col1, _col10, _col11, _col12, _col13, _col14, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
    - Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                        Reduce Output Operator
                          key expressions: _col0 (type: double)
                          sort order: +
    - Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                          value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: bigint), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double)
              Reducer 3
                  Reduce Operator Tree:
                    Select Operator
                      expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: double), VALUE._col11 (type: double), VALUE._col12 (type: double), VALUE._col13 (type: double), VALUE._col12 (type: double)
                      outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
    - Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                      File Output Operator
                        compressed: false
    - Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                        table:
                            input format: org.apache.hadoop.mapred.TextInputFormat
                            output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat

    http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/vectorization_0.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/vectorization_0.q.out b/ql/src/test/results/clientpositive/vectorization_0.q.out
    index 3fab2ff..89163cd 100644
    --- a/ql/src/test/results/clientpositive/vectorization_0.q.out
    +++ b/ql/src/test/results/clientpositive/vectorization_0.q.out
    @@ -1010,7 +1010,7 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble)) (type: boolean)
    + predicate: ((cstring2 like '%b%') or (79.553 <> UDFToDouble(cint)) or (UDFToDouble(cbigint) < cdouble) or ((UDFToShort(ctinyint) >= csmallint) and (cboolean2 = 1) and (3569.0 = UDFToDouble(ctinyint)))) (type: boolean)
                    Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cbigint (type: bigint), cfloat (type: float), ctinyint (type: tinyint)

    http://git-wip-us.apache.org/repos/asf/hive/blob/b8f1ae11/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
    index f2cb3ec..728f628 100644
    --- a/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
    +++ b/ql/src/test/results/clientpositive/vectorization_short_regress.q.out
    @@ -2031,23 +2031,23 @@ STAGE PLANS:
                  alias: alltypesorc
                  Statistics: Num rows: 12288 Data size: 2641964 Basic stats: COMPLETE Column stats: NONE
                  Filter Operator
    - predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
    - Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
    + predicate: ((cdouble > 2563.58) and (((cbigint >= UDFToLong(cint)) and (UDFToInteger(csmallint) < cint) and (UDFToDouble(cfloat) < -5638.15)) or (2563.58 = UDFToDouble(ctinyint)) or ((cdouble <= UDFToDouble(cbigint)) and (-5638.15 > UDFToDouble(cbigint))))) (type: boolean)
    + Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                    Select Operator
                      expressions: cdouble (type: double), cfloat (type: float)
                      outputColumnNames: _col0, _col1
    - Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                      Group By Operator
                        aggregations: var_samp(_col0), count(_col1), sum(_col1), var_pop(_col0), stddev_pop(_col0), sum(_col0)
                        keys: _col0 (type: double)
                        mode: hash
                        outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
    - Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                        Reduce Output Operator
                          key expressions: _col0 (type: double)
                          sort order: +
                          Map-reduce partition columns: _col0 (type: double)
    - Statistics: Num rows: 606 Data size: 130292 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 2654 Data size: 570619 Basic stats: COMPLETE Column stats: NONE
                          value expressions: _col1 (type: struct<count:bigint,sum:double,variance:double>), _col2 (type: bigint), _col3 (type: double), _col4 (type: struct<count:bigint,sum:double,variance:double>), _col5 (type: struct<count:bigint,sum:double,variance:double>), _col6 (type: double)
            Execution mode: vectorized
            Reduce Operator Tree:
    @@ -2056,11 +2056,11 @@ STAGE PLANS:
                keys: KEY._col0 (type: double)
                mode: mergepartial
                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6
    - Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                Select Operator
                  expressions: _col0 (type: double), _col1 (type: double), _col5 (type: double), (_col0 + _col1) (type: double), (_col0 * 762.0) (type: double), _col6 (type: double), (-863.257 % (_col0 * 762.0)) (type: double), (2563.58 * _col1) (type: double), (- _col1) (type: double), _col2 (type: bigint), ((2563.58 * _col1) + -5638.15) (type: double), ((- _col1) * ((2563.58 * _col1) + -5638.15)) (type: double), _col3 (type: double), _col4 (type: double), (_col0 - (- _col1)) (type: double)
                  outputColumnNames: _col0, _col1, _col10, _col11, _col12, _col13, _col14, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9
    - Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                  File Output Operator
                    compressed: false
                    table:
    @@ -2075,16 +2075,16 @@ STAGE PLANS:
                  Reduce Output Operator
                    key expressions: _col0 (type: double)
                    sort order: +
    - Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                    value expressions: _col1 (type: double), _col2 (type: double), _col3 (type: double), _col4 (type: bigint), _col5 (type: double), _col6 (type: double), _col7 (type: double), _col8 (type: double), _col9 (type: double), _col10 (type: double), _col11 (type: double), _col12 (type: double), _col13 (type: double), _col14 (type: double)
            Reduce Operator Tree:
              Select Operator
                expressions: KEY.reducesinkkey0 (type: double), VALUE._col0 (type: double), VALUE._col1 (type: double), VALUE._col2 (type: double), VALUE._col3 (type: bigint), VALUE._col4 (type: double), VALUE._col5 (type: double), VALUE._col6 (type: double), VALUE._col7 (type: double), VALUE._col8 (type: double), VALUE._col9 (type: double), VALUE._col10 (type: double), VALUE._col11 (type: double), VALUE._col12 (type: double), VALUE._col13 (type: double), VALUE._col12 (type: double)
                outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15
    - Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                File Output Operator
                  compressed: false
    - Statistics: Num rows: 303 Data size: 65146 Basic stats: COMPLETE Column stats: NONE
    + Statistics: Num rows: 1327 Data size: 285309 Basic stats: COMPLETE Column stats: NONE
                  table:
                      input format: org.apache.hadoop.mapred.TextInputFormat
                      output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11562: Typo in hive-log4j2.xml throws unknown level exception (Prasanth Jayachandran reviewed by Sergey Shelukhin)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d307abbf
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d307abbf
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d307abbf

    Branch: refs/heads/hbase-metastore
    Commit: d307abbf1093ffdc2599489bd6f67bdb8dcb3e14
    Parents: c93d6c7
    Author: Prasanth Jayachandran <j.prasanth.j@gmail.com>
    Authored: Fri Aug 14 14:01:57 2015 -0700
    Committer: Prasanth Jayachandran <j.prasanth.j@gmail.com>
    Committed: Fri Aug 14 14:01:57 2015 -0700

    ----------------------------------------------------------------------
      data/conf/hive-log4j2.xml | 2 +-
      1 file changed, 1 insertion(+), 1 deletion(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/d307abbf/data/conf/hive-log4j2.xml
    ----------------------------------------------------------------------
    diff --git a/data/conf/hive-log4j2.xml b/data/conf/hive-log4j2.xml
    index c9adfa2..a40be2c 100644
    --- a/data/conf/hive-log4j2.xml
    +++ b/data/conf/hive-log4j2.xml
    @@ -70,7 +70,7 @@

        <Loggers>
          <Root level="${sys:hive.log.threshold}">
    - <AppenderRef ref="${sys:hive.root.logger}" level="{sys:hive.log.level}"/>
    + <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
            <AppenderRef ref="EventCounter" />
          </Root>
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11567: Some trace logs seeped through with new log4j2 changes (Prasanth Jayachandran reviewed by Sergey Shelukhin)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/cf0481fc
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/cf0481fc
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/cf0481fc

    Branch: refs/heads/hbase-metastore
    Commit: cf0481fcf26087dc2cd2de8b10bc2b13befa96ac
    Parents: 0fab86c
    Author: Prasanth Jayachandran <j.prasanth.j@gmail.com>
    Authored: Fri Aug 14 14:04:10 2015 -0700
    Committer: Prasanth Jayachandran <j.prasanth.j@gmail.com>
    Committed: Fri Aug 14 14:04:10 2015 -0700

    ----------------------------------------------------------------------
      beeline/src/main/resources/beeline-log4j2.xml | 2 +-
      common/src/test/resources/hive-exec-log4j2-test.xml | 2 +-
      common/src/test/resources/hive-log4j2-test.xml | 2 +-
      data/conf/hive-log4j2.xml | 2 +-
      hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml | 2 +-
      ql/src/main/resources/hive-exec-log4j2.xml | 2 +-
      testutils/ptest2/src/main/resources/log4j2.xml | 2 +-
      7 files changed, 7 insertions(+), 7 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/beeline/src/main/resources/beeline-log4j2.xml
    ----------------------------------------------------------------------
    diff --git a/beeline/src/main/resources/beeline-log4j2.xml b/beeline/src/main/resources/beeline-log4j2.xml
    index 5f09741..2349c5a 100644
    --- a/beeline/src/main/resources/beeline-log4j2.xml
    +++ b/beeline/src/main/resources/beeline-log4j2.xml
    @@ -20,7 +20,7 @@
       packages="org.apache.hadoop.hive.ql.log">

        <Properties>
    - <Property name="hive.log.threshold">ALL</Property>
    + <Property name="hive.log.threshold">DEBUG</Property>
          <Property name="hive.log.level">WARN</Property>
          <Property name="hive.root.logger">console</Property>
        </Properties>

    http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/common/src/test/resources/hive-exec-log4j2-test.xml
    ----------------------------------------------------------------------
    diff --git a/common/src/test/resources/hive-exec-log4j2-test.xml b/common/src/test/resources/hive-exec-log4j2-test.xml
    index b5f2cb4..1d91b26 100644
    --- a/common/src/test/resources/hive-exec-log4j2-test.xml
    +++ b/common/src/test/resources/hive-exec-log4j2-test.xml
    @@ -20,7 +20,7 @@
       packages="org.apache.hadoop.hive.ql.log">

        <Properties>
    - <Property name="hive.log.threshold">ALL</Property>
    + <Property name="hive.log.threshold">DEBUG</Property>
          <Property name="hive.log.level">INFO</Property>
          <Property name="hive.root.logger">FA</Property>
          <Property name="hive.log.dir">${sys:test.tmp.dir}/${sys:user.name}-TestHiveLogging</Property>

    http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/common/src/test/resources/hive-log4j2-test.xml
    ----------------------------------------------------------------------
    diff --git a/common/src/test/resources/hive-log4j2-test.xml b/common/src/test/resources/hive-log4j2-test.xml
    index 63b46c8..98ca6f8 100644
    --- a/common/src/test/resources/hive-log4j2-test.xml
    +++ b/common/src/test/resources/hive-log4j2-test.xml
    @@ -20,7 +20,7 @@
       packages="org.apache.hadoop.hive.ql.log">

        <Properties>
    - <Property name="hive.log.threshold">ALL</Property>
    + <Property name="hive.log.threshold">DEBUG</Property>
          <Property name="hive.log.level">WARN</Property>
          <Property name="hive.root.logger">DRFA</Property>
          <Property name="hive.log.dir">${sys:test.tmp.dir}/${sys:user.name}-TestHiveLogging</Property>

    http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/data/conf/hive-log4j2.xml
    ----------------------------------------------------------------------
    diff --git a/data/conf/hive-log4j2.xml b/data/conf/hive-log4j2.xml
    index ff8e4d3..51173a0 100644
    --- a/data/conf/hive-log4j2.xml
    +++ b/data/conf/hive-log4j2.xml
    @@ -20,7 +20,7 @@
       packages="org.apache.hadoop.hive.ql.log">

        <Properties>
    - <Property name="hive.log.threshold">ALL</Property>
    + <Property name="hive.log.threshold">DEBUG</Property>
          <Property name="hive.log.level">DEBUG</Property>
          <Property name="hive.root.logger">DRFA</Property>
          <Property name="hive.log.dir">${sys:test.tmp.dir}/log</Property>

    http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
    ----------------------------------------------------------------------
    diff --git a/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml b/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
    index 40da974..96f0974 100644
    --- a/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
    +++ b/hcatalog/webhcat/svr/src/main/config/webhcat-log4j2.xml
    @@ -20,7 +20,7 @@
       packages="org.apache.hadoop.hive.ql.log">

        <Properties>
    - <Property name="webhcat.log.threshold">ALL</Property>
    + <Property name="webhcat.log.threshold">DEBUG</Property>
          <Property name="webhcat.log.level">INFO</Property>
          <Property name="webhcat.root.logger">standard</Property>
          <Property name="webhcat.log.dir">.</Property>

    http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/ql/src/main/resources/hive-exec-log4j2.xml
    ----------------------------------------------------------------------
    diff --git a/ql/src/main/resources/hive-exec-log4j2.xml b/ql/src/main/resources/hive-exec-log4j2.xml
    index c93437c..8b520a2 100644
    --- a/ql/src/main/resources/hive-exec-log4j2.xml
    +++ b/ql/src/main/resources/hive-exec-log4j2.xml
    @@ -20,7 +20,7 @@
       packages="org.apache.hadoop.hive.ql.log">

        <Properties>
    - <Property name="hive.log.threshold">ALL</Property>
    + <Property name="hive.log.threshold">DEBUG</Property>
          <Property name="hive.log.level">INFO</Property>
          <Property name="hive.root.logger">FA</Property>
          <Property name="hive.log.dir">${sys:java.io.tmpdir}/${sys:user.name}</Property>

    http://git-wip-us.apache.org/repos/asf/hive/blob/cf0481fc/testutils/ptest2/src/main/resources/log4j2.xml
    ----------------------------------------------------------------------
    diff --git a/testutils/ptest2/src/main/resources/log4j2.xml b/testutils/ptest2/src/main/resources/log4j2.xml
    index 992462e..6502ad1 100644
    --- a/testutils/ptest2/src/main/resources/log4j2.xml
    +++ b/testutils/ptest2/src/main/resources/log4j2.xml
    @@ -20,7 +20,7 @@
       packages="org.apache.hadoop.hive.ql.log">

        <Properties>
    - <Property name="hive.ptest.log.threshold">ALL</Property>
    + <Property name="hive.ptest.log.threshold">DEBUG</Property>
          <Property name="hive.ptest.log.level">DEBUG</Property>
          <Property name="hive.ptest.root.logger">FILE</Property>
          <Property name="hive.ptest.log.dir">target</Property>
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11563: Perflogger loglines are repeated (Prasanth Jayachandran reviewed by Sergey Shelukhin)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/0fab86c9
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/0fab86c9
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/0fab86c9

    Branch: refs/heads/hbase-metastore
    Commit: 0fab86c9d7730275cb9962c24ce9d2f92f0c7150
    Parents: d307abb
    Author: Prasanth Jayachandran <j.prasanth.j@gmail.com>
    Authored: Fri Aug 14 14:02:51 2015 -0700
    Committer: Prasanth Jayachandran <j.prasanth.j@gmail.com>
    Committed: Fri Aug 14 14:02:51 2015 -0700

    ----------------------------------------------------------------------
      data/conf/hive-log4j2.xml | 2 +-
      1 file changed, 1 insertion(+), 1 deletion(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/0fab86c9/data/conf/hive-log4j2.xml
    ----------------------------------------------------------------------
    diff --git a/data/conf/hive-log4j2.xml b/data/conf/hive-log4j2.xml
    index a40be2c..ff8e4d3 100644
    --- a/data/conf/hive-log4j2.xml
    +++ b/data/conf/hive-log4j2.xml
    @@ -95,7 +95,7 @@
          <Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
            <AppenderRef ref="${sys:hive.root.logger}"/>
          </Logger>
    - <Logger name="org.apache.hadoop.hive.ql.log.PerfLogger" level="${sys:hive.ql.log.PerfLogger.level}">
    + <Logger name="org.apache.hadoop.hive.ql.log.PerfLogger" level="${sys:hive.ql.log.PerfLogger.level}" additivity="false">
            <AppenderRef ref="${sys:hive.root.logger}"/>
          </Logger>
          <Logger name="org.apache.hadoop.hive.ql.exec.Operator" level="INFO">
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11538 : Add an option to skip init script while running tests (Ashutosh Chauhan via Sergey Shelukhin)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/6e762919
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/6e762919
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/6e762919

    Branch: refs/heads/hbase-metastore
    Commit: 6e7629193ed7b8714bd5ae5ab48bd10c2cbd85cf
    Parents: 433ea9c
    Author: Ashutosh Chauhan <hashutosh@apache.org>
    Authored: Thu Aug 13 20:42:33 2015 -0700
    Committer: Ashutosh Chauhan <hashutosh@apache.org>
    Committed: Thu Aug 13 20:43:46 2015 -0700

    ----------------------------------------------------------------------
      itests/qtest/pom.xml | 26 ++++----
      .../org/apache/hadoop/hive/ql/QTestUtil.java | 62 ++++++++++----------
      2 files changed, 44 insertions(+), 44 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/6e762919/itests/qtest/pom.xml
    ----------------------------------------------------------------------
    diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
    index 44d30da..0588233 100644
    --- a/itests/qtest/pom.xml
    +++ b/itests/qtest/pom.xml
    @@ -30,7 +30,7 @@

        <properties>
          <hive.path.to.root>../..</hive.path.to.root>
    -
    + <initScript>q_test_init.sql</initScript>
          <qfile></qfile>
          <qfile_regex></qfile_regex>
          <run_disabled>false</run_disabled>
    @@ -420,7 +420,7 @@
                        logFile="${project.build.directory}/testparseneggen.log"
                        hadoopVersion="${active.hadoop.version}"
                        logDirectory="${project.build.directory}/qfile-results/negative/"
    - initScript="q_test_init.sql"
    + initScript="${initScript}"
                        cleanupScript="q_test_cleanup.sql"/>

                      <!-- Cli -->
    @@ -437,7 +437,7 @@
                        logFile="${project.build.directory}/testclidrivergen.log"
                        logDirectory="${project.build.directory}/qfile-results/clientpositive/"
                        hadoopVersion="${active.hadoop.version}"
    - initScript="q_test_init.sql"
    + initScript="${initScript}"
                        cleanupScript="q_test_cleanup.sql"/>

                      <!-- Negative Cli -->
    @@ -454,7 +454,7 @@
                        logFile="${project.build.directory}/testnegativeclidrivergen.log"
                        logDirectory="${project.build.directory}/qfile-results/clientnegative/"
                        hadoopVersion="${active.hadoop.version}"
    - initScript="q_test_init.sql"
    + initScript="${initScript}"
                        cleanupScript="q_test_cleanup.sql"/>

                      <!-- Compare Cli -->
    @@ -470,7 +470,7 @@
                        logFile="${project.build.directory}/testcompareclidrivergen.log"
                        logDirectory="${project.build.directory}/qfile-results/clientcompare/"
                        hadoopVersion="${active.hadoop.version}"
    - initScript="q_test_init.sql"
    + initScript="${initScript}"
                        cleanupScript="q_test_cleanup.sql"/>

                      <!-- Minimr -->
    @@ -487,7 +487,7 @@
                        logFile="${project.build.directory}/testminimrclidrivergen.log"
                        logDirectory="${project.build.directory}/qfile-results/clientpositive/"
                        hadoopVersion="${active.hadoop.version}"
    - initScript="q_test_init.sql"
    + initScript="${initScript}"
                        cleanupScript="q_test_cleanup.sql"/>

                      <if>
    @@ -508,7 +508,7 @@
                                    logFile="${project.build.directory}/testminitezclidrivergen.log"
                                    logDirectory="${project.build.directory}/qfile-results/clientpositive/"
                                    hadoopVersion="${active.hadoop.version}"
    - initScript="q_test_init.sql"
    + initScript="${initScript}"
                                    cleanupScript="q_test_cleanup.sql"/>

                          <qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
    @@ -547,7 +547,7 @@
                        logFile="${project.build.directory}/testnegativeminimrclidrivergen.log"
                        logDirectory="${project.build.directory}/qfile-results/clientnegative/"
                        hadoopVersion="${hadoopVersion}"
    - initScript="q_test_init.sql"
    + initScript="${initScript}"
                        cleanupScript="q_test_cleanup.sql"/>

                      <!-- HBase Positive -->
    @@ -561,7 +561,7 @@
                        resultsDirectory="${basedir}/${hive.path.to.root}/hbase-handler/src/test/results/positive/" className="TestHBaseCliDriver"
                        logFile="${project.build.directory}/testhbaseclidrivergen.log"
                        logDirectory="${project.build.directory}/qfile-results/hbase-handler/positive/"
    - initScript="q_test_init.sql"
    + initScript="${initScript}"
                        cleanupScript="q_test_cleanup.sql"/>

                      <!-- HBase Minimr -->
    @@ -575,7 +575,7 @@
                        resultsDirectory="${basedir}/${hive.path.to.root}/hbase-handler/src/test/results/positive/" className="TestHBaseMinimrCliDriver"
                        logFile="${project.build.directory}/testhbaseminimrclidrivergen.log"
                        logDirectory="${project.build.directory}/qfile-results/hbase-handler/minimrpositive/"
    - initScript="q_test_init.sql"
    + initScript="${initScript}"
                        cleanupScript="q_test_cleanup.sql"/>

                      <!-- HBase Negative -->
    @@ -589,7 +589,7 @@
                        resultsDirectory="${basedir}/${hive.path.to.root}/hbase-handler/src/test/results/negative/" className="TestHBaseNegativeCliDriver"
                        logFile="${project.build.directory}/testhbasenegativeclidrivergen.log"
                        logDirectory="${project.build.directory}/qfile-results/hbase-handler/negative"
    - initScript="q_test_init.sql"
    + initScript="${initScript}"
                        cleanupScript="q_test_cleanup.sql"/>

                      <!-- Beeline -->
    @@ -626,7 +626,7 @@
                        logFile="${project.build.directory}/testcontribclidrivergen.log"
                        logDirectory="${project.build.directory}/qfile-results/contribclientpositive"
                        hadoopVersion="${hadoopVersion}"
    - initScript="q_test_init.sql"
    + initScript="${initScript}"
                        cleanupScript="q_test_cleanup.sql"/>

                      <qtestgen hiveRootDirectory="${basedir}/${hive.path.to.root}/"
    @@ -639,7 +639,7 @@
                        resultsDirectory="${basedir}/${hive.path.to.root}/contrib/src/test/results/clientnegative/" className="TestContribNegativeCliDriver"
                        logFile="${project.build.directory}/testcontribnegclidrivergen.log"
                        logDirectory="${project.build.directory}/qfile-results/contribclientnegative"
    - initScript="q_test_init.sql"
    + initScript="${initScript}"
                        cleanupScript="q_test_cleanup.sql"/>



    http://git-wip-us.apache.org/repos/asf/hive/blob/6e762919/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
    ----------------------------------------------------------------------
    diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
    index 39d5d9e..3fae0ba 100644
    --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
    +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
    @@ -121,8 +121,8 @@ public class QTestUtil {

        private static final Log LOG = LogFactory.getLog("QTestUtil");
        private static final String QTEST_LEAVE_FILES = "QTEST_LEAVE_FILES";
    - private final String defaultInitScript = "q_test_init.sql";
    - private final String defaultCleanupScript = "q_test_cleanup.sql";
    + private final static String defaultInitScript = "q_test_init.sql";
    + private final static String defaultCleanupScript = "q_test_cleanup.sql";
        private final String[] testOnlyCommands = new String[]{"crypto"};

        private String testWarehouse;
    @@ -149,7 +149,6 @@ public class QTestUtil {
        private HadoopShims.MiniMrShim mr = null;
        private HadoopShims.MiniDFSShim dfs = null;
        private HadoopShims.HdfsEncryptionShim hes = null;
    - private boolean miniMr = false;
        private String hadoopVer = null;
        private QTestSetup setup = null;
        private SparkSession sparkSession = null;
    @@ -209,7 +208,7 @@ public class QTestUtil {
                continue;
              }

    - if (file.isDir()) {
    + if (file.isDirectory()) {
                if (!destFs.exists(local_path)) {
                  destFs.mkdirs(local_path);
                }
    @@ -410,14 +409,9 @@ public class QTestUtil {
          if (scriptsDir == null) {
            scriptsDir = new File(".").getAbsolutePath() + "/data/scripts";
          }
    - if (initScript.isEmpty()) {
    - initScript = defaultInitScript;
    - }
    - if (cleanupScript.isEmpty()) {
    - cleanupScript = defaultCleanupScript;
    - }
    - this.initScript = scriptsDir + "/" + initScript;
    - this.cleanupScript = scriptsDir + "/" + cleanupScript;
    +
    + this.initScript = scriptsDir + File.separator + initScript;
    + this.cleanupScript = scriptsDir + File.separator + cleanupScript;

          overWrite = "true".equalsIgnoreCase(System.getProperty("test.output.overwrite"));

    @@ -705,7 +699,7 @@ public class QTestUtil {
            FileSystem fileSystem = p.getFileSystem(conf);
            if (fileSystem.exists(p)) {
              for (FileStatus status : fileSystem.listStatus(p)) {
    - if (status.isDir() && !srcTables.contains(status.getPath().getName())) {
    + if (status.isDirectory() && !srcTables.contains(status.getPath().getName())) {
                  fileSystem.delete(status.getPath(), true);
                }
              }
    @@ -755,16 +749,19 @@ public class QTestUtil {
          clearTablesCreatedDuringTests();
          clearKeysCreatedInTests();

    - SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", true);
    -
    - String cleanupCommands = readEntireFileIntoString(new File(cleanupScript));
    - LOG.info("Cleanup (" + cleanupScript + "):\n" + cleanupCommands);
    - if(cliDriver == null) {
    - cliDriver = new CliDriver();
    + File cleanupFile = new File(cleanupScript);
    + if (cleanupFile.isFile()) {
    + String cleanupCommands = readEntireFileIntoString(cleanupFile);
    + LOG.info("Cleanup (" + cleanupScript + "):\n" + cleanupCommands);
    + if(cliDriver == null) {
    + cliDriver = new CliDriver();
    + }
    + SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", true);
    + cliDriver.processLine(cleanupCommands);
    + SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", false);
    + } else {
    + LOG.info("No cleanup script detected. Skipping.");
          }
    - cliDriver.processLine(cleanupCommands);
    -
    - SessionState.get().getConf().setBoolean("hive.test.shutdown.phase", false);

          // delete any contents in the warehouse dir
          Path p = new Path(testWarehouse);
    @@ -809,14 +806,21 @@ public class QTestUtil {
          if(!isSessionStateStarted) {
            startSessionState();
          }
    - conf.setBoolean("hive.test.init.phase", true);

    - String initCommands = readEntireFileIntoString(new File(this.initScript));
    - LOG.info("Initial setup (" + initScript + "):\n" + initCommands);
          if(cliDriver == null) {
            cliDriver = new CliDriver();
          }
          cliDriver.processLine("set test.data.dir=" + testFiles + ";");
    + File scriptFile = new File(this.initScript);
    + if (!scriptFile.isFile()) {
    + LOG.info("No init script detected. Skipping");
    + return;
    + }
    + conf.setBoolean("hive.test.init.phase", true);
    +
    + String initCommands = readEntireFileIntoString(scriptFile);
    + LOG.info("Initial setup (" + initScript + "):\n" + initCommands);
    +
          cliDriver.processLine(initCommands);

          conf.setBoolean("hive.test.init.phase", false);
    @@ -912,6 +916,7 @@ public class QTestUtil {

        private CliSessionState createSessionState() {
         return new CliSessionState(conf) {
    + @Override
            public void setSparkSession(SparkSession sparkSession) {
              super.setSparkSession(sparkSession);
              if (sparkSession != null) {
    @@ -1136,11 +1141,6 @@ public class QTestUtil {
          return commands;
        }

    - private boolean isComment(final String line) {
    - String lineTrimmed = line.trim();
    - return lineTrimmed.startsWith("#") || lineTrimmed.startsWith("--");
    - }
    -
        public boolean shouldBeSkipped(String tname) {
          return qSkipSet.contains(tname);
        }
    @@ -1816,7 +1816,7 @@ public class QTestUtil {
        {
          QTestUtil[] qt = new QTestUtil[qfiles.length];
          for (int i = 0; i < qfiles.length; i++) {
    - qt[i] = new QTestUtil(resDir, logDir, MiniClusterType.none, null, "0.20", "", "");
    + qt[i] = new QTestUtil(resDir, logDir, MiniClusterType.none, null, "0.20", defaultInitScript, defaultCleanupScript);
            qt[i].addFile(qfiles[i]);
            qt[i].clearTestSideEffects();
          }
  • Sershe at Aug 17, 2015 at 10:00 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnAbortedException.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnAbortedException.java
    index ae39507,a877338..fe60838
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnAbortedException.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnAbortedException.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class TxnAbortedException extends TException implements org.apache.thrift.TBase<TxnAbortedException, TxnAbortedException._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class TxnAbortedException extends TException implements org.apache.thrift.TBase<TxnAbortedException, TxnAbortedException._Fields>, java.io.Serializable, Cloneable, Comparable<TxnAbortedException> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TxnAbortedException");

         private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnInfo.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnInfo.java
    index 18cbe53,8b255b9..266fbe1
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnInfo.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnInfo.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class TxnInfo implements org.apache.thrift.TBase<TxnInfo, TxnInfo._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class TxnInfo implements org.apache.thrift.TBase<TxnInfo, TxnInfo._Fields>, java.io.Serializable, Cloneable, Comparable<TxnInfo> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TxnInfo");

         private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I64, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnOpenException.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnOpenException.java
    index 4f5d02d,05af505..18db1b8
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnOpenException.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TxnOpenException.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class TxnOpenException extends TException implements org.apache.thrift.TBase<TxnOpenException, TxnOpenException._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class TxnOpenException extends TException implements org.apache.thrift.TBase<TxnOpenException, TxnOpenException._Fields>, java.io.Serializable, Cloneable, Comparable<TxnOpenException> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TxnOpenException");

         private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java
    index 1882b57,61e7ceb..b330ce2
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class Type implements org.apache.thrift.TBase<Type, Type._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class Type implements org.apache.thrift.TBase<Type, Type._Fields>, java.io.Serializable, Cloneable, Comparable<Type> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Type");

         private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownDBException.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownDBException.java
    index ab91419,e05e79d..b7623ca
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownDBException.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownDBException.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class UnknownDBException extends TException implements org.apache.thrift.TBase<UnknownDBException, UnknownDBException._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class UnknownDBException extends TException implements org.apache.thrift.TBase<UnknownDBException, UnknownDBException._Fields>, java.io.Serializable, Cloneable, Comparable<UnknownDBException> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnknownDBException");

         private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownPartitionException.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownPartitionException.java
    index 7e28591,c626bf6..bdd674b
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownPartitionException.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownPartitionException.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class UnknownPartitionException extends TException implements org.apache.thrift.TBase<UnknownPartitionException, UnknownPartitionException._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class UnknownPartitionException extends TException implements org.apache.thrift.TBase<UnknownPartitionException, UnknownPartitionException._Fields>, java.io.Serializable, Cloneable, Comparable<UnknownPartitionException> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnknownPartitionException");

         private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownTableException.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownTableException.java
    index 7aa8012,2856121..768eb65
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownTableException.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnknownTableException.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class UnknownTableException extends TException implements org.apache.thrift.TBase<UnknownTableException, UnknownTableException._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class UnknownTableException extends TException implements org.apache.thrift.TBase<UnknownTableException, UnknownTableException._Fields>, java.io.Serializable, Cloneable, Comparable<UnknownTableException> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnknownTableException");

         private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnlockRequest.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnlockRequest.java
    index 54b949d,cf248e0..395c15f
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnlockRequest.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/UnlockRequest.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class UnlockRequest implements org.apache.thrift.TBase<UnlockRequest, UnlockRequest._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class UnlockRequest implements org.apache.thrift.TBase<UnlockRequest, UnlockRequest._Fields>, java.io.Serializable, Cloneable, Comparable<UnlockRequest> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("UnlockRequest");

         private static final org.apache.thrift.protocol.TField LOCKID_FIELD_DESC = new org.apache.thrift.protocol.TField("lockid", org.apache.thrift.protocol.TType.I64, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Version.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Version.java
    index aa83fd7,cc8d5f5..f3cbb74
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Version.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Version.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class Version implements org.apache.thrift.TBase<Version, Version._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class Version implements org.apache.thrift.TBase<Version, Version._Fields>, java.io.Serializable, Cloneable, Comparable<Version> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Version");

         private static final org.apache.thrift.protocol.TField VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("version", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    index 1684674,ae47cb5..9c73767
    --- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    +++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
    @@@ -15,131 -16,997 +16,1000 @@@ use Thrift\Protocol\TBinaryProtocolAcce
       use Thrift\Exception\TApplicationException;


    + /**
    + * This interface is live.
    + */
       interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
    + /**
    + * @param string $key
    + * @return string
    + * @throws \metastore\MetaException
    + */
         public function getMetaConf($key);
    + /**
    + * @param string $key
    + * @param string $value
    + * @throws \metastore\MetaException
    + */
         public function setMetaConf($key, $value);
    + /**
    + * @param \metastore\Database $database
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\MetaException
    + */
         public function create_database(\metastore\Database $database);
    + /**
    + * @param string $name
    + * @return \metastore\Database
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function get_database($name);
    + /**
    + * @param string $name
    + * @param bool $deleteData
    + * @param bool $cascade
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\InvalidOperationException
    + * @throws \metastore\MetaException
    + */
         public function drop_database($name, $deleteData, $cascade);
    + /**
    + * @param string $pattern
    + * @return string[]
    + * @throws \metastore\MetaException
    + */
         public function get_databases($pattern);
    + /**
    + * @return string[]
    + * @throws \metastore\MetaException
    + */
         public function get_all_databases();
    + /**
    + * @param string $dbname
    + * @param \metastore\Database $db
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function alter_database($dbname, \metastore\Database $db);
    + /**
    + * @param string $name
    + * @return \metastore\Type
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function get_type($name);
    + /**
    + * @param \metastore\Type $type
    + * @return bool
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\MetaException
    + */
         public function create_type(\metastore\Type $type);
    + /**
    + * @param string $type
    + * @return bool
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function drop_type($type);
    + /**
    + * @param string $name
    + * @return array
    + * @throws \metastore\MetaException
    + */
         public function get_type_all($name);
    + /**
    + * @param string $db_name
    + * @param string $table_name
    + * @return \metastore\FieldSchema[]
    + * @throws \metastore\MetaException
    + * @throws \metastore\UnknownTableException
    + * @throws \metastore\UnknownDBException
    + */
         public function get_fields($db_name, $table_name);
    + /**
    + * @param string $db_name
    + * @param string $table_name
    + * @param \metastore\EnvironmentContext $environment_context
    + * @return \metastore\FieldSchema[]
    + * @throws \metastore\MetaException
    + * @throws \metastore\UnknownTableException
    + * @throws \metastore\UnknownDBException
    + */
         public function get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context);
    + /**
    + * @param string $db_name
    + * @param string $table_name
    + * @return \metastore\FieldSchema[]
    + * @throws \metastore\MetaException
    + * @throws \metastore\UnknownTableException
    + * @throws \metastore\UnknownDBException
    + */
         public function get_schema($db_name, $table_name);
    + /**
    + * @param string $db_name
    + * @param string $table_name
    + * @param \metastore\EnvironmentContext $environment_context
    + * @return \metastore\FieldSchema[]
    + * @throws \metastore\MetaException
    + * @throws \metastore\UnknownTableException
    + * @throws \metastore\UnknownDBException
    + */
         public function get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context);
    + /**
    + * @param \metastore\Table $tbl
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function create_table(\metastore\Table $tbl);
    + /**
    + * @param \metastore\Table $tbl
    + * @param \metastore\EnvironmentContext $environment_context
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context);
    + /**
    + * @param string $dbname
    + * @param string $name
    + * @param bool $deleteData
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function drop_table($dbname, $name, $deleteData);
    + /**
    + * @param string $dbname
    + * @param string $name
    + * @param bool $deleteData
    + * @param \metastore\EnvironmentContext $environment_context
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function drop_table_with_environment_context($dbname, $name, $deleteData, \metastore\EnvironmentContext $environment_context);
    + /**
    + * @param string $db_name
    + * @param string $pattern
    + * @return string[]
    + * @throws \metastore\MetaException
    + */
         public function get_tables($db_name, $pattern);
    + /**
    + * @param string $db_name
    + * @return string[]
    + * @throws \metastore\MetaException
    + */
         public function get_all_tables($db_name);
    + /**
    + * @param string $dbname
    + * @param string $tbl_name
    + * @return \metastore\Table
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function get_table($dbname, $tbl_name);
    - public function get_table_objects_by_name($dbname, $tbl_names);
    + /**
    + * @param string $dbname
    + * @param string[] $tbl_names
    + * @return \metastore\Table[]
    + * @throws \metastore\MetaException
    + * @throws \metastore\InvalidOperationException
    + * @throws \metastore\UnknownDBException
    + */
    + public function get_table_objects_by_name($dbname, array $tbl_names);
    + /**
    + * @param string $dbname
    + * @param string $filter
    + * @param int $max_tables
    + * @return string[]
    + * @throws \metastore\MetaException
    + * @throws \metastore\InvalidOperationException
    + * @throws \metastore\UnknownDBException
    + */
         public function get_table_names_by_filter($dbname, $filter, $max_tables);
    + /**
    + * @param string $dbname
    + * @param string $tbl_name
    + * @param \metastore\Table $new_tbl
    + * @throws \metastore\InvalidOperationException
    + * @throws \metastore\MetaException
    + */
         public function alter_table($dbname, $tbl_name, \metastore\Table $new_tbl);
    + /**
    + * @param string $dbname
    + * @param string $tbl_name
    + * @param \metastore\Table $new_tbl
    + * @param \metastore\EnvironmentContext $environment_context
    + * @throws \metastore\InvalidOperationException
    + * @throws \metastore\MetaException
    + */
         public function alter_table_with_environment_context($dbname, $tbl_name, \metastore\Table $new_tbl, \metastore\EnvironmentContext $environment_context);
    + /**
    + * @param string $dbname
    + * @param string $tbl_name
    + * @param \metastore\Table $new_tbl
    + * @param bool $cascade
    + * @throws \metastore\InvalidOperationException
    + * @throws \metastore\MetaException
    + */
         public function alter_table_with_cascade($dbname, $tbl_name, \metastore\Table $new_tbl, $cascade);
    + /**
    + * @param \metastore\Partition $new_part
    + * @return \metastore\Partition
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\MetaException
    + */
         public function add_partition(\metastore\Partition $new_part);
    + /**
    + * @param \metastore\Partition $new_part
    + * @param \metastore\EnvironmentContext $environment_context
    + * @return \metastore\Partition
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\MetaException
    + */
         public function add_partition_with_environment_context(\metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context);
    - public function add_partitions($new_parts);
    - public function add_partitions_pspec($new_parts);
    - public function append_partition($db_name, $tbl_name, $part_vals);
    + /**
    + * @param \metastore\Partition[] $new_parts
    + * @return int
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\MetaException
    + */
    + public function add_partitions(array $new_parts);
    + /**
    + * @param \metastore\PartitionSpec[] $new_parts
    + * @return int
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\MetaException
    + */
    + public function add_partitions_pspec(array $new_parts);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string[] $part_vals
    + * @return \metastore\Partition
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\MetaException
    + */
    + public function append_partition($db_name, $tbl_name, array $part_vals);
    + /**
    + * @param \metastore\AddPartitionsRequest $request
    + * @return \metastore\AddPartitionsResult
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\MetaException
    + */
         public function add_partitions_req(\metastore\AddPartitionsRequest $request);
    - public function append_partition_with_environment_context($db_name, $tbl_name, $part_vals, \metastore\EnvironmentContext $environment_context);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string[] $part_vals
    + * @param \metastore\EnvironmentContext $environment_context
    + * @return \metastore\Partition
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\MetaException
    + */
    + public function append_partition_with_environment_context($db_name, $tbl_name, array $part_vals, \metastore\EnvironmentContext $environment_context);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $part_name
    + * @return \metastore\Partition
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\MetaException
    + */
         public function append_partition_by_name($db_name, $tbl_name, $part_name);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $part_name
    + * @param \metastore\EnvironmentContext $environment_context
    + * @return \metastore\Partition
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\MetaException
    + */
         public function append_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, \metastore\EnvironmentContext $environment_context);
    - public function drop_partition($db_name, $tbl_name, $part_vals, $deleteData);
    - public function drop_partition_with_environment_context($db_name, $tbl_name, $part_vals, $deleteData, \metastore\EnvironmentContext $environment_context);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string[] $part_vals
    + * @param bool $deleteData
    + * @return bool
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
    + public function drop_partition($db_name, $tbl_name, array $part_vals, $deleteData);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string[] $part_vals
    + * @param bool $deleteData
    + * @param \metastore\EnvironmentContext $environment_context
    + * @return bool
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
    + public function drop_partition_with_environment_context($db_name, $tbl_name, array $part_vals, $deleteData, \metastore\EnvironmentContext $environment_context);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $part_name
    + * @param bool $deleteData
    + * @return bool
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function drop_partition_by_name($db_name, $tbl_name, $part_name, $deleteData);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $part_name
    + * @param bool $deleteData
    + * @param \metastore\EnvironmentContext $environment_context
    + * @return bool
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function drop_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, $deleteData, \metastore\EnvironmentContext $environment_context);
    + /**
    + * @param \metastore\DropPartitionsRequest $req
    + * @return \metastore\DropPartitionsResult
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function drop_partitions_req(\metastore\DropPartitionsRequest $req);
    - public function get_partition($db_name, $tbl_name, $part_vals);
    - public function exchange_partition($partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name);
    - public function get_partition_with_auth($db_name, $tbl_name, $part_vals, $user_name, $group_names);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string[] $part_vals
    + * @return \metastore\Partition
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
    + public function get_partition($db_name, $tbl_name, array $part_vals);
    + /**
    + * @param array $partitionSpecs
    + * @param string $source_db
    + * @param string $source_table_name
    + * @param string $dest_db
    + * @param string $dest_table_name
    + * @return \metastore\Partition
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\InvalidInputException
    + */
    + public function exchange_partition(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string[] $part_vals
    + * @param string $user_name
    + * @param string[] $group_names
    + * @return \metastore\Partition
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
    + public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $part_name
    + * @return \metastore\Partition
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function get_partition_by_name($db_name, $tbl_name, $part_name);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param int $max_parts
    + * @return \metastore\Partition[]
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function get_partitions($db_name, $tbl_name, $max_parts);
    - public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, $group_names);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param int $max_parts
    + * @param string $user_name
    + * @param string[] $group_names
    + * @return \metastore\Partition[]
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
    + public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param int $max_parts
    + * @return \metastore\PartitionSpec[]
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function get_partitions_pspec($db_name, $tbl_name, $max_parts);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param int $max_parts
    + * @return string[]
    + * @throws \metastore\MetaException
    + */
         public function get_partition_names($db_name, $tbl_name, $max_parts);
    - public function get_partitions_ps($db_name, $tbl_name, $part_vals, $max_parts);
    - public function get_partitions_ps_with_auth($db_name, $tbl_name, $part_vals, $max_parts, $user_name, $group_names);
    - public function get_partition_names_ps($db_name, $tbl_name, $part_vals, $max_parts);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string[] $part_vals
    + * @param int $max_parts
    + * @return \metastore\Partition[]
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
    + public function get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string[] $part_vals
    + * @param int $max_parts
    + * @param string $user_name
    + * @param string[] $group_names
    + * @return \metastore\Partition[]
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
    + public function get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string[] $part_vals
    + * @param int $max_parts
    + * @return string[]
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
    + public function get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $filter
    + * @param int $max_parts
    + * @return \metastore\Partition[]
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $filter
    + * @param int $max_parts
    + * @return \metastore\PartitionSpec[]
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts);
    + /**
    + * @param \metastore\PartitionsByExprRequest $req
    + * @return \metastore\PartitionsByExprResult
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function get_partitions_by_expr(\metastore\PartitionsByExprRequest $req);
    - public function get_partitions_by_names($db_name, $tbl_name, $names);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string[] $names
    + * @return \metastore\Partition[]
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
    + public function get_partitions_by_names($db_name, $tbl_name, array $names);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param \metastore\Partition $new_part
    + * @throws \metastore\InvalidOperationException
    + * @throws \metastore\MetaException
    + */
         public function alter_partition($db_name, $tbl_name, \metastore\Partition $new_part);
    - public function alter_partitions($db_name, $tbl_name, $new_parts);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param \metastore\Partition[] $new_parts
    + * @throws \metastore\InvalidOperationException
    + * @throws \metastore\MetaException
    + */
    + public function alter_partitions($db_name, $tbl_name, array $new_parts);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param \metastore\Partition $new_part
    + * @param \metastore\EnvironmentContext $environment_context
    + * @throws \metastore\InvalidOperationException
    + * @throws \metastore\MetaException
    + */
         public function alter_partition_with_environment_context($db_name, $tbl_name, \metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context);
    - public function rename_partition($db_name, $tbl_name, $part_vals, \metastore\Partition $new_part);
    - public function partition_name_has_valid_characters($part_vals, $throw_exception);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string[] $part_vals
    + * @param \metastore\Partition $new_part
    + * @throws \metastore\InvalidOperationException
    + * @throws \metastore\MetaException
    + */
    + public function rename_partition($db_name, $tbl_name, array $part_vals, \metastore\Partition $new_part);
    + /**
    + * @param string[] $part_vals
    + * @param bool $throw_exception
    + * @return bool
    + * @throws \metastore\MetaException
    + */
    + public function partition_name_has_valid_characters(array $part_vals, $throw_exception);
    + /**
    + * @param string $name
    + * @param string $defaultValue
    + * @return string
    + * @throws \metastore\ConfigValSecurityException
    + */
         public function get_config_value($name, $defaultValue);
    + /**
    + * @param string $part_name
    + * @return string[]
    + * @throws \metastore\MetaException
    + */
         public function partition_name_to_vals($part_name);
    + /**
    + * @param string $part_name
    + * @return array
    + * @throws \metastore\MetaException
    + */
         public function partition_name_to_spec($part_name);
    - public function markPartitionForEvent($db_name, $tbl_name, $part_vals, $eventType);
    - public function isPartitionMarkedForEvent($db_name, $tbl_name, $part_vals, $eventType);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param array $part_vals
    + * @param int $eventType
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\UnknownDBException
    + * @throws \metastore\UnknownTableException
    + * @throws \metastore\UnknownPartitionException
    + * @throws \metastore\InvalidPartitionException
    + */
    + public function markPartitionForEvent($db_name, $tbl_name, array $part_vals, $eventType);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param array $part_vals
    + * @param int $eventType
    + * @return bool
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\UnknownDBException
    + * @throws \metastore\UnknownTableException
    + * @throws \metastore\UnknownPartitionException
    + * @throws \metastore\InvalidPartitionException
    + */
    + public function isPartitionMarkedForEvent($db_name, $tbl_name, array $part_vals, $eventType);
    + /**
    + * @param \metastore\Index $new_index
    + * @param \metastore\Table $index_table
    + * @return \metastore\Index
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\MetaException
    + */
         public function add_index(\metastore\Index $new_index, \metastore\Table $index_table);
    + /**
    + * @param string $dbname
    + * @param string $base_tbl_name
    + * @param string $idx_name
    + * @param \metastore\Index $new_idx
    + * @throws \metastore\InvalidOperationException
    + * @throws \metastore\MetaException
    + */
         public function alter_index($dbname, $base_tbl_name, $idx_name, \metastore\Index $new_idx);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $index_name
    + * @param bool $deleteData
    + * @return bool
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function drop_index_by_name($db_name, $tbl_name, $index_name, $deleteData);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $index_name
    + * @return \metastore\Index
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function get_index_by_name($db_name, $tbl_name, $index_name);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param int $max_indexes
    + * @return \metastore\Index[]
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function get_indexes($db_name, $tbl_name, $max_indexes);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param int $max_indexes
    + * @return string[]
    + * @throws \metastore\MetaException
    + */
         public function get_index_names($db_name, $tbl_name, $max_indexes);
    + /**
    + * @param \metastore\ColumnStatistics $stats_obj
    + * @return bool
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\MetaException
    + * @throws \metastore\InvalidInputException
    + */
         public function update_table_column_statistics(\metastore\ColumnStatistics $stats_obj);
    + /**
    + * @param \metastore\ColumnStatistics $stats_obj
    + * @return bool
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\MetaException
    + * @throws \metastore\InvalidInputException
    + */
         public function update_partition_column_statistics(\metastore\ColumnStatistics $stats_obj);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $col_name
    + * @return \metastore\ColumnStatistics
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + * @throws \metastore\InvalidInputException
    + * @throws \metastore\InvalidObjectException
    + */
         public function get_table_column_statistics($db_name, $tbl_name, $col_name);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $part_name
    + * @param string $col_name
    + * @return \metastore\ColumnStatistics
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + * @throws \metastore\InvalidInputException
    + * @throws \metastore\InvalidObjectException
    + */
         public function get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name);
    + /**
    + * @param \metastore\TableStatsRequest $request
    + * @return \metastore\TableStatsResult
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function get_table_statistics_req(\metastore\TableStatsRequest $request);
    + /**
    + * @param \metastore\PartitionsStatsRequest $request
    + * @return \metastore\PartitionsStatsResult
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function get_partitions_statistics_req(\metastore\PartitionsStatsRequest $request);
    + /**
    + * @param \metastore\PartitionsStatsRequest $request
    + * @return \metastore\AggrStats
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function get_aggr_stats_for(\metastore\PartitionsStatsRequest $request);
    + /**
    + * @param \metastore\SetPartitionsStatsRequest $request
    + * @return bool
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\MetaException
    + * @throws \metastore\InvalidInputException
    + */
         public function set_aggr_stats_for(\metastore\SetPartitionsStatsRequest $request);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $part_name
    + * @param string $col_name
    + * @return bool
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\InvalidInputException
    + */
         public function delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name);
    + /**
    + * @param string $db_name
    + * @param string $tbl_name
    + * @param string $col_name
    + * @return bool
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\InvalidInputException
    + */
         public function delete_table_column_statistics($db_name, $tbl_name, $col_name);
    + /**
    + * @param \metastore\Function $func
    + * @throws \metastore\AlreadyExistsException
    + * @throws \metastore\InvalidObjectException
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function create_function(\metastore\Function $func);
    + /**
    + * @param string $dbName
    + * @param string $funcName
    + * @throws \metastore\NoSuchObjectException
    + * @throws \metastore\MetaException
    + */
         public function drop_function($dbName, $funcName);
    + /**
    + * @param string $dbName
    + * @param string $funcName
    + * @param \metastore\Function $newFunc
    + * @throws \metastore\InvalidOperationException
    + * @throws \metastore\MetaException
    + */
         public function alter_function($dbName, $funcName, \metastore\Function $newFunc);
    + /**
    + * @param string $dbName
    + * @param string $pattern
    + * @return string[]
    + * @throws \metastore\MetaException
    + */
         public function get_functions($dbName, $pattern);
    + /**
    + * @param string $dbName
    + * @param string $funcName
    + * @return \metastore\Function
    + * @throws \metastore\MetaException
    + * @throws \metastore\NoSuchObjectException
    + */
         public function get_function($dbName, $funcName);
    + /**
    + * @return \metastore\GetAllFunctionsResponse
    + * @throws \metastore\MetaException
    + */
    + public function get_all_functions();
    + /**
    + * @param \metastore\Role $role
    + * @return bool
    + * @throws \metastore\MetaException
    + */
         public function create_role(\metastore\Role $role);
    + /**
    + * @param string $role_name
    + * @return bool
    + * @throws \metastore\MetaException
    + */
         public function drop_role($role_name);
    + /**
    + * @return string[]
    + * @throws \metastore\MetaException
    + */
         public function get_role_names();
    + /**
    + * @param string $role_name
    + * @param string $principal_name
    + * @param int $principal_type
    + * @param string $grantor
    + * @param int $grantorType
    + * @param bool $grant_option
    + * @return bool
    + * @throws \metastore\MetaException
    + */
         public function grant_role($role_name, $principal_name, $principal_type, $grantor, $grantorType, $grant_option);
    + /**
    + * @param string $role_name
    + * @param string $principal_name
    + * @param int $principal_type
    + * @return bool
    + * @throws \metastore\MetaException
    + */
         public function revoke_role($role_name, $principal_name, $principal_type);
    + /**
    + * @param string $principal_name
    + * @param int $principal_type
    + * @return \metastore\Role[]
    + * @throws \metastore\MetaException
    + */
         public function list_roles($principal_name, $principal_type);
    + /**
    + * @param \metastore\GrantRevokeRoleRequest $request
    + * @return \metastore\GrantRevokeRoleResponse
    + * @throws \metastore\MetaException
    + */
         public function grant_revoke_role(\metastore\GrantRevokeRoleRequest $request);
    + /**
    + * @param \metastore\GetPrincipalsInRoleRequest $request
    + * @return \metastore\GetPrincipalsInRoleResponse
    + * @throws \metastore\MetaException
    + */
         public function get_principals_in_role(\metastore\GetPrincipalsInRoleRequest $request);
    + /**
    + * @param \metastore\GetRoleGrantsForPrincipalRequest $request
    + * @return \metastore\GetRoleGrantsForPrincipalResponse
    + * @throws \metastore\MetaException
    + */
         public function get_role_grants_for_principal(\metastore\GetRoleGrantsForPrincipalRequest $request);
    - public function get_privilege_set(\metastore\HiveObjectRef $hiveObject, $user_name, $group_names);
    + /**
    + * @param \metastore\HiveObjectRef $hiveObject
    + * @param string $user_name
    + * @param string[] $group_names
    + * @return \metastore\PrincipalPrivilegeSet
    + * @throws \metastore\MetaException
    + */
    + public function get_privilege_set(\metastore\HiveObjectRef $hiveObject, $user_name, array $group_names);
    + /**
    + * @param string $principal_name
    + * @param int $principal_type
    + * @param \metastore\HiveObjectRef $hiveObject
    + * @return \metastore\HiveObjectPrivilege[]
    + * @throws \metastore\MetaException
    + */
         public function list_privileges($principal_name, $principal_type, \metastore\HiveObjectRef $hiveObject);
    + /**
    + * @param \metastore\PrivilegeBag $privileges
    + * @return bool
    + * @throws \metastore\MetaException
    + */
         public function grant_privileges(\metastore\PrivilegeBag $privileges);
    + /**
    + * @param \metastore\PrivilegeBag $privileges
    + * @return bool
    + * @throws \metastore\MetaException
    + */
         public function revoke_privileges(\metastore\PrivilegeBag $privileges);
    + /**
    + * @param \metastore\GrantRevokePrivilegeRequest $request
    + * @return \metastore\GrantRevokePrivilegeResponse
    + * @throws \metastore\MetaException
    + */
         public function grant_revoke_privileges(\metastore\GrantRevokePrivilegeRequest $request);
    - public function set_ugi($user_name, $group_names);
    + /**
    + * @param string $user_name
    + * @param string[] $group_names
    + * @return string[]
    + * @throws \metastore\MetaException
    + */
    + public function set_ugi($user_name, array $group_names);
    + /**
    + * @param string $token_owner
    + * @param string $renewer_kerberos_principal_name
    + * @return string
    + * @throws \metastore\MetaException
    + */
         public function get_delegation_token($token_owner, $renewer_kerberos_principal_name);
    + /**
    + * @param string $token_str_form
    + * @return int
    + * @throws \metastore\MetaException
    + */
         public function renew_delegation_token($token_str_form);
    + /**
    + * @param string $token_str_form
    + * @throws \metastore\MetaException
    + */
         public function cancel_delegation_token($token_str_form);
    + /**
    + * @return \metastore\GetOpenTxnsResponse
    + */
         public function get_open_txns();
    + /**
    + * @return \metastore\GetOpenTxnsInfoResponse
    + */
         public function get_open_txns_info();
    + /**
    + * @param \metastore\OpenTxnRequest $rqst
    + * @return \metastore\OpenTxnsResponse
    + */
         public function open_txns(\metastore\OpenTxnRequest $rqst);
    + /**
    + * @param \metastore\AbortTxnRequest $rqst
    + * @throws \metastore\NoSuchTxnException
    + */
         public function abort_txn(\metastore\AbortTxnRequest $rqst);
    + /**
    + * @param \metastore\CommitTxnRequest $rqst
    + * @throws \metastore\NoSuchTxnException
    + * @throws \metastore\TxnAbortedException
    + */
         public function commit_txn(\metastore\CommitTxnRequest $rqst);
    + /**
    + * @param \metastore\LockRequest $rqst
    + * @return \metastore\LockResponse
    + * @throws \metastore\NoSuchTxnException
    + * @throws \metastore\TxnAbortedException
    + */
         public function lock(\metastore\LockRequest $rqst);
    + /**
    + * @param \metastore\CheckLockRequest $rqst
    + * @return \metastore\LockResponse
    + * @throws \metastore\NoSuchTxnException
    + * @throws \metastore\TxnAbortedException
    + * @throws \metastore\NoSuchLockException
    + */
         public function check_lock(\metastore\CheckLockRequest $rqst);
    + /**
    + * @param \metastore\UnlockRequest $rqst
    + * @throws \metastore\NoSuchLockException
    + * @throws \metastore\TxnOpenException
    + */
         public function unlock(\metastore\UnlockRequest $rqst);
    + /**
    + * @param \metastore\ShowLocksRequest $rqst
    + * @return \metastore\ShowLocksResponse
    + */
         public function show_locks(\metastore\ShowLocksRequest $rqst);
    + /**
    + * @param \metastore\HeartbeatRequest $ids
    + * @throws \metastore\NoSuchLockException
    + * @throws \metastore\NoSuchTxnException
    + * @throws \metastore\TxnAbortedException
    + */
         public function heartbeat(\metastore\HeartbeatRequest $ids);
    + /**
    + * @param \metastore\HeartbeatTxnRangeRequest $txns
    + * @return \metastore\HeartbeatTxnRangeResponse
    + */
         public function heartbeat_txn_range(\metastore\HeartbeatTxnRangeRequest $txns);
    + /**
    + * @param \metastore\CompactionRequest $rqst
    + */
         public function compact(\metastore\CompactionRequest $rqst);
    + /**
    + * @param \metastore\ShowCompactRequest $rqst
    + * @return \metastore\ShowCompactResponse
    + */
         public function show_compact(\metastore\ShowCompactRequest $rqst);
    + /**
    + * @param \metastore\AddDynamicPartitions $rqst
    + * @throws \metastore\NoSuchTxnException
    + * @throws \metastore\TxnAbortedException
    + */
         public function add_dynamic_partitions(\metastore\AddDynamicPartitions $rqst);
    + /**
    + * @param \metastore\NotificationEventRequest $rqst
    + * @return \metastore\NotificationEventResponse
    + */
         public function get_next_notification(\metastore\NotificationEventRequest $rqst);
    + /**
    + * @return \metastore\CurrentNotificationEventId
    + */
         public function get_current_notificationEventId();
    + /**
    + * @param \metastore\FireEventRequest $rqst
    + * @return \metastore\FireEventResponse
    + */
         public function fire_listener_event(\metastore\FireEventRequest $rqst);
    ++ /**
    ++ */
      + public function flushCache();
       }

       class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf {
    @@@ -7221,53 -8141,6 +8144,53 @@@
           throw new \Exception("fire_listener_event failed: unknown result");
         }

      + public function flushCache()
      + {
      + $this->send_flushCache();
      + $this->recv_flushCache();
      + }
      +
      + public function send_flushCache()
      + {
      + $args = new \metastore\ThriftHiveMetastore_flushCache_args();
    - $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary');
    ++ $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
      + if ($bin_accel)
      + {
      + thrift_protocol_write_binary($this->output_, 'flushCache', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
      + }
      + else
      + {
      + $this->output_->writeMessageBegin('flushCache', TMessageType::CALL, $this->seqid_);
      + $args->write($this->output_);
      + $this->output_->writeMessageEnd();
      + $this->output_->getTransport()->flush();
      + }
      + }
      +
      + public function recv_flushCache()
      + {
    - $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary');
    ++ $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
      + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_flushCache_result', $this->input_->isStrictRead());
      + else
      + {
      + $rseqid = 0;
      + $fname = null;
      + $mtype = 0;
      +
      + $this->input_->readMessageBegin($fname, $mtype, $rseqid);
      + if ($mtype == TMessageType::EXCEPTION) {
      + $x = new TApplicationException();
      + $x->read($this->input_);
      + $this->input_->readMessageEnd();
      + throw $x;
      + }
      + $result = new \metastore\ThriftHiveMetastore_flushCache_result();
      + $result->read($this->input_);
      + $this->input_->readMessageEnd();
      + }
      + return;
      + }
      +
       }

       // HELPER FUNCTIONS AND STRUCTURES

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    index 531a475,dc348ef..6bd2728
    mode 100644,100755..100755
    --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
    @@@ -15,139 -16,152 +16,153 @@@ from thrift.transport import TSSLSocke
       from thrift.transport import THttpClient
       from thrift.protocol import TBinaryProtocol

    - import ThriftHiveMetastore
    - from ttypes import *
    + from hive_metastore import ThriftHiveMetastore
    + from hive_metastore.ttypes import *

       if len(sys.argv) <= 1 or sys.argv[1] == '--help':
    - print ''
    - print 'Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] function [arg1 [arg2...]]'
    - print ''
    - print 'Functions:'
    - print ' string getMetaConf(string key)'
    - print ' void setMetaConf(string key, string value)'
    - print ' void create_database(Database database)'
    - print ' Database get_database(string name)'
    - print ' void drop_database(string name, bool deleteData, bool cascade)'
    - print ' get_databases(string pattern)'
    - print ' get_all_databases()'
    - print ' void alter_database(string dbname, Database db)'
    - print ' Type get_type(string name)'
    - print ' bool create_type(Type type)'
    - print ' bool drop_type(string type)'
    - print ' get_type_all(string name)'
    - print ' get_fields(string db_name, string table_name)'
    - print ' get_fields_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)'
    - print ' get_schema(string db_name, string table_name)'
    - print ' get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)'
    - print ' void create_table(Table tbl)'
    - print ' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)'
    - print ' void drop_table(string dbname, string name, bool deleteData)'
    - print ' void drop_table_with_environment_context(string dbname, string name, bool deleteData, EnvironmentContext environment_context)'
    - print ' get_tables(string db_name, string pattern)'
    - print ' get_all_tables(string db_name)'
    - print ' Table get_table(string dbname, string tbl_name)'
    - print ' get_table_objects_by_name(string dbname, tbl_names)'
    - print ' get_table_names_by_filter(string dbname, string filter, i16 max_tables)'
    - print ' void alter_table(string dbname, string tbl_name, Table new_tbl)'
    - print ' void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)'
    - print ' void alter_table_with_cascade(string dbname, string tbl_name, Table new_tbl, bool cascade)'
    - print ' Partition add_partition(Partition new_part)'
    - print ' Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context)'
    - print ' i32 add_partitions( new_parts)'
    - print ' i32 add_partitions_pspec( new_parts)'
    - print ' Partition append_partition(string db_name, string tbl_name, part_vals)'
    - print ' AddPartitionsResult add_partitions_req(AddPartitionsRequest request)'
    - print ' Partition append_partition_with_environment_context(string db_name, string tbl_name, part_vals, EnvironmentContext environment_context)'
    - print ' Partition append_partition_by_name(string db_name, string tbl_name, string part_name)'
    - print ' Partition append_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, EnvironmentContext environment_context)'
    - print ' bool drop_partition(string db_name, string tbl_name, part_vals, bool deleteData)'
    - print ' bool drop_partition_with_environment_context(string db_name, string tbl_name, part_vals, bool deleteData, EnvironmentContext environment_context)'
    - print ' bool drop_partition_by_name(string db_name, string tbl_name, string part_name, bool deleteData)'
    - print ' bool drop_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext environment_context)'
    - print ' DropPartitionsResult drop_partitions_req(DropPartitionsRequest req)'
    - print ' Partition get_partition(string db_name, string tbl_name, part_vals)'
    - print ' Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)'
    - print ' Partition get_partition_with_auth(string db_name, string tbl_name, part_vals, string user_name, group_names)'
    - print ' Partition get_partition_by_name(string db_name, string tbl_name, string part_name)'
    - print ' get_partitions(string db_name, string tbl_name, i16 max_parts)'
    - print ' get_partitions_with_auth(string db_name, string tbl_name, i16 max_parts, string user_name, group_names)'
    - print ' get_partitions_pspec(string db_name, string tbl_name, i32 max_parts)'
    - print ' get_partition_names(string db_name, string tbl_name, i16 max_parts)'
    - print ' get_partitions_ps(string db_name, string tbl_name, part_vals, i16 max_parts)'
    - print ' get_partitions_ps_with_auth(string db_name, string tbl_name, part_vals, i16 max_parts, string user_name, group_names)'
    - print ' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts)'
    - print ' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)'
    - print ' get_part_specs_by_filter(string db_name, string tbl_name, string filter, i32 max_parts)'
    - print ' PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req)'
    - print ' get_partitions_by_names(string db_name, string tbl_name, names)'
    - print ' void alter_partition(string db_name, string tbl_name, Partition new_part)'
    - print ' void alter_partitions(string db_name, string tbl_name, new_parts)'
    - print ' void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)'
    - print ' void rename_partition(string db_name, string tbl_name, part_vals, Partition new_part)'
    - print ' bool partition_name_has_valid_characters( part_vals, bool throw_exception)'
    - print ' string get_config_value(string name, string defaultValue)'
    - print ' partition_name_to_vals(string part_name)'
    - print ' partition_name_to_spec(string part_name)'
    - print ' void markPartitionForEvent(string db_name, string tbl_name, part_vals, PartitionEventType eventType)'
    - print ' bool isPartitionMarkedForEvent(string db_name, string tbl_name, part_vals, PartitionEventType eventType)'
    - print ' Index add_index(Index new_index, Table index_table)'
    - print ' void alter_index(string dbname, string base_tbl_name, string idx_name, Index new_idx)'
    - print ' bool drop_index_by_name(string db_name, string tbl_name, string index_name, bool deleteData)'
    - print ' Index get_index_by_name(string db_name, string tbl_name, string index_name)'
    - print ' get_indexes(string db_name, string tbl_name, i16 max_indexes)'
    - print ' get_index_names(string db_name, string tbl_name, i16 max_indexes)'
    - print ' bool update_table_column_statistics(ColumnStatistics stats_obj)'
    - print ' bool update_partition_column_statistics(ColumnStatistics stats_obj)'
    - print ' ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name)'
    - print ' ColumnStatistics get_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)'
    - print ' TableStatsResult get_table_statistics_req(TableStatsRequest request)'
    - print ' PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request)'
    - print ' AggrStats get_aggr_stats_for(PartitionsStatsRequest request)'
    - print ' bool set_aggr_stats_for(SetPartitionsStatsRequest request)'
    - print ' bool delete_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)'
    - print ' bool delete_table_column_statistics(string db_name, string tbl_name, string col_name)'
    - print ' void create_function(Function func)'
    - print ' void drop_function(string dbName, string funcName)'
    - print ' void alter_function(string dbName, string funcName, Function newFunc)'
    - print ' get_functions(string dbName, string pattern)'
    - print ' Function get_function(string dbName, string funcName)'
    - print ' bool create_role(Role role)'
    - print ' bool drop_role(string role_name)'
    - print ' get_role_names()'
    - print ' bool grant_role(string role_name, string principal_name, PrincipalType principal_type, string grantor, PrincipalType grantorType, bool grant_option)'
    - print ' bool revoke_role(string role_name, string principal_name, PrincipalType principal_type)'
    - print ' list_roles(string principal_name, PrincipalType principal_type)'
    - print ' GrantRevokeRoleResponse grant_revoke_role(GrantRevokeRoleRequest request)'
    - print ' GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest request)'
    - print ' GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(GetRoleGrantsForPrincipalRequest request)'
    - print ' PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, string user_name, group_names)'
    - print ' list_privileges(string principal_name, PrincipalType principal_type, HiveObjectRef hiveObject)'
    - print ' bool grant_privileges(PrivilegeBag privileges)'
    - print ' bool revoke_privileges(PrivilegeBag privileges)'
    - print ' GrantRevokePrivilegeResponse grant_revoke_privileges(GrantRevokePrivilegeRequest request)'
    - print ' set_ugi(string user_name, group_names)'
    - print ' string get_delegation_token(string token_owner, string renewer_kerberos_principal_name)'
    - print ' i64 renew_delegation_token(string token_str_form)'
    - print ' void cancel_delegation_token(string token_str_form)'
    - print ' GetOpenTxnsResponse get_open_txns()'
    - print ' GetOpenTxnsInfoResponse get_open_txns_info()'
    - print ' OpenTxnsResponse open_txns(OpenTxnRequest rqst)'
    - print ' void abort_txn(AbortTxnRequest rqst)'
    - print ' void commit_txn(CommitTxnRequest rqst)'
    - print ' LockResponse lock(LockRequest rqst)'
    - print ' LockResponse check_lock(CheckLockRequest rqst)'
    - print ' void unlock(UnlockRequest rqst)'
    - print ' ShowLocksResponse show_locks(ShowLocksRequest rqst)'
    - print ' void heartbeat(HeartbeatRequest ids)'
    - print ' HeartbeatTxnRangeResponse heartbeat_txn_range(HeartbeatTxnRangeRequest txns)'
    - print ' void compact(CompactionRequest rqst)'
    - print ' ShowCompactResponse show_compact(ShowCompactRequest rqst)'
    - print ' void add_dynamic_partitions(AddDynamicPartitions rqst)'
    - print ' NotificationEventResponse get_next_notification(NotificationEventRequest rqst)'
    - print ' CurrentNotificationEventId get_current_notificationEventId()'
    - print ' FireEventResponse fire_listener_event(FireEventRequest rqst)'
    - print ' void flushCache()'
    - print ''
    + print('')
    + print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] function [arg1 [arg2...]]')
    + print('')
    + print('Functions:')
    + print(' string getMetaConf(string key)')
    + print(' void setMetaConf(string key, string value)')
    + print(' void create_database(Database database)')
    + print(' Database get_database(string name)')
    + print(' void drop_database(string name, bool deleteData, bool cascade)')
    + print(' get_databases(string pattern)')
    + print(' get_all_databases()')
    + print(' void alter_database(string dbname, Database db)')
    + print(' Type get_type(string name)')
    + print(' bool create_type(Type type)')
    + print(' bool drop_type(string type)')
    + print(' get_type_all(string name)')
    + print(' get_fields(string db_name, string table_name)')
    + print(' get_fields_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)')
    + print(' get_schema(string db_name, string table_name)')
    + print(' get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)')
    + print(' void create_table(Table tbl)')
    + print(' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)')
    + print(' void drop_table(string dbname, string name, bool deleteData)')
    + print(' void drop_table_with_environment_context(string dbname, string name, bool deleteData, EnvironmentContext environment_context)')
    + print(' get_tables(string db_name, string pattern)')
    + print(' get_all_tables(string db_name)')
    + print(' Table get_table(string dbname, string tbl_name)')
    + print(' get_table_objects_by_name(string dbname, tbl_names)')
    + print(' get_table_names_by_filter(string dbname, string filter, i16 max_tables)')
    + print(' void alter_table(string dbname, string tbl_name, Table new_tbl)')
    + print(' void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)')
    + print(' void alter_table_with_cascade(string dbname, string tbl_name, Table new_tbl, bool cascade)')
    + print(' Partition add_partition(Partition new_part)')
    + print(' Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context)')
    + print(' i32 add_partitions( new_parts)')
    + print(' i32 add_partitions_pspec( new_parts)')
    + print(' Partition append_partition(string db_name, string tbl_name, part_vals)')
    + print(' AddPartitionsResult add_partitions_req(AddPartitionsRequest request)')
    + print(' Partition append_partition_with_environment_context(string db_name, string tbl_name, part_vals, EnvironmentContext environment_context)')
    + print(' Partition append_partition_by_name(string db_name, string tbl_name, string part_name)')
    + print(' Partition append_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, EnvironmentContext environment_context)')
    + print(' bool drop_partition(string db_name, string tbl_name, part_vals, bool deleteData)')
    + print(' bool drop_partition_with_environment_context(string db_name, string tbl_name, part_vals, bool deleteData, EnvironmentContext environment_context)')
    + print(' bool drop_partition_by_name(string db_name, string tbl_name, string part_name, bool deleteData)')
    + print(' bool drop_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext environment_context)')
    + print(' DropPartitionsResult drop_partitions_req(DropPartitionsRequest req)')
    + print(' Partition get_partition(string db_name, string tbl_name, part_vals)')
    + print(' Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)')
    + print(' Partition get_partition_with_auth(string db_name, string tbl_name, part_vals, string user_name, group_names)')
    + print(' Partition get_partition_by_name(string db_name, string tbl_name, string part_name)')
    + print(' get_partitions(string db_name, string tbl_name, i16 max_parts)')
    + print(' get_partitions_with_auth(string db_name, string tbl_name, i16 max_parts, string user_name, group_names)')
    + print(' get_partitions_pspec(string db_name, string tbl_name, i32 max_parts)')
    + print(' get_partition_names(string db_name, string tbl_name, i16 max_parts)')
    + print(' get_partitions_ps(string db_name, string tbl_name, part_vals, i16 max_parts)')
    + print(' get_partitions_ps_with_auth(string db_name, string tbl_name, part_vals, i16 max_parts, string user_name, group_names)')
    + print(' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts)')
    + print(' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)')
    + print(' get_part_specs_by_filter(string db_name, string tbl_name, string filter, i32 max_parts)')
    + print(' PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req)')
    + print(' get_partitions_by_names(string db_name, string tbl_name, names)')
    + print(' void alter_partition(string db_name, string tbl_name, Partition new_part)')
    + print(' void alter_partitions(string db_name, string tbl_name, new_parts)')
    + print(' void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)')
    + print(' void rename_partition(string db_name, string tbl_name, part_vals, Partition new_part)')
    + print(' bool partition_name_has_valid_characters( part_vals, bool throw_exception)')
    + print(' string get_config_value(string name, string defaultValue)')
    + print(' partition_name_to_vals(string part_name)')
    + print(' partition_name_to_spec(string part_name)')
    + print(' void markPartitionForEvent(string db_name, string tbl_name, part_vals, PartitionEventType eventType)')
    + print(' bool isPartitionMarkedForEvent(string db_name, string tbl_name, part_vals, PartitionEventType eventType)')
    + print(' Index add_index(Index new_index, Table index_table)')
    + print(' void alter_index(string dbname, string base_tbl_name, string idx_name, Index new_idx)')
    + print(' bool drop_index_by_name(string db_name, string tbl_name, string index_name, bool deleteData)')
    + print(' Index get_index_by_name(string db_name, string tbl_name, string index_name)')
    + print(' get_indexes(string db_name, string tbl_name, i16 max_indexes)')
    + print(' get_index_names(string db_name, string tbl_name, i16 max_indexes)')
    + print(' bool update_table_column_statistics(ColumnStatistics stats_obj)')
    + print(' bool update_partition_column_statistics(ColumnStatistics stats_obj)')
    + print(' ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name)')
    + print(' ColumnStatistics get_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)')
    + print(' TableStatsResult get_table_statistics_req(TableStatsRequest request)')
    + print(' PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request)')
    + print(' AggrStats get_aggr_stats_for(PartitionsStatsRequest request)')
    + print(' bool set_aggr_stats_for(SetPartitionsStatsRequest request)')
    + print(' bool delete_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)')
    + print(' bool delete_table_column_statistics(string db_name, string tbl_name, string col_name)')
    + print(' void create_function(Function func)')
    + print(' void drop_function(string dbName, string funcName)')
    + print(' void alter_function(string dbName, string funcName, Function newFunc)')
    + print(' get_functions(string dbName, string pattern)')
    + print(' Function get_function(string dbName, string funcName)')
    + print(' GetAllFunctionsResponse get_all_functions()')
    + print(' bool create_role(Role role)')
    + print(' bool drop_role(string role_name)')
    + print(' get_role_names()')
    + print(' bool grant_role(string role_name, string principal_name, PrincipalType principal_type, string grantor, PrincipalType grantorType, bool grant_option)')
    + print(' bool revoke_role(string role_name, string principal_name, PrincipalType principal_type)')
    + print(' list_roles(string principal_name, PrincipalType principal_type)')
    + print(' GrantRevokeRoleResponse grant_revoke_role(GrantRevokeRoleRequest request)')
    + print(' GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest request)')
    + print(' GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(GetRoleGrantsForPrincipalRequest request)')
    + print(' PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, string user_name, group_names)')
    + print(' list_privileges(string principal_name, PrincipalType principal_type, HiveObjectRef hiveObject)')
    + print(' bool grant_privileges(PrivilegeBag privileges)')
    + print(' bool revoke_privileges(PrivilegeBag privileges)')
    + print(' GrantRevokePrivilegeResponse grant_revoke_privileges(GrantRevokePrivilegeRequest request)')
    + print(' set_ugi(string user_name, group_names)')
    + print(' string get_delegation_token(string token_owner, string renewer_kerberos_principal_name)')
    + print(' i64 renew_delegation_token(string token_str_form)')
    + print(' void cancel_delegation_token(string token_str_form)')
    + print(' GetOpenTxnsResponse get_open_txns()')
    + print(' GetOpenTxnsInfoResponse get_open_txns_info()')
    + print(' OpenTxnsResponse open_txns(OpenTxnRequest rqst)')
    + print(' void abort_txn(AbortTxnRequest rqst)')
    + print(' void commit_txn(CommitTxnRequest rqst)')
    + print(' LockResponse lock(LockRequest rqst)')
    + print(' LockResponse check_lock(CheckLockRequest rqst)')
    + print(' void unlock(UnlockRequest rqst)')
    + print(' ShowLocksResponse show_locks(ShowLocksRequest rqst)')
    + print(' void heartbeat(HeartbeatRequest ids)')
    + print(' HeartbeatTxnRangeResponse heartbeat_txn_range(HeartbeatTxnRangeRequest txns)')
    + print(' void compact(CompactionRequest rqst)')
    + print(' ShowCompactResponse show_compact(ShowCompactRequest rqst)')
    + print(' void add_dynamic_partitions(AddDynamicPartitions rqst)')
    + print(' NotificationEventResponse get_next_notification(NotificationEventRequest rqst)')
    + print(' CurrentNotificationEventId get_current_notificationEventId()')
    + print(' FireEventResponse fire_listener_event(FireEventRequest rqst)')
    ++ print(' void flushCache()')
    + print(' string getName()')
    + print(' string getVersion()')
    + print(' fb_status getStatus()')
    + print(' string getStatusDetails()')
    + print(' getCounters()')
    + print(' i64 getCounter(string key)')
    + print(' void setOption(string key, string value)')
    + print(' string getOption(string key)')
    + print(' getOptions()')
    ++ print(' string getCpuProfile(i32 profileDurationInSec)')
    + print(' i64 aliveSince()')
      - print(' reflection_limited.Service getLimitedReflection()')
    + print(' void reinitialize()')
    + print(' void shutdown()')
    + print('')
         sys.exit(0)

       pp = pprint.PrettyPrinter(indent = 2)
    @@@ -936,14 -961,86 +962,92 @@@ elif cmd == 'fire_listener_event'
           sys.exit(1)
         pp.pprint(client.fire_listener_event(eval(args[0]),))

      +elif cmd == 'flushCache':
      + if len(args) != 0:
    - print 'flushCache requires 0 args'
    ++ print('flushCache requires 0 args')
      + sys.exit(1)
      + pp.pprint(client.flushCache())
      +
    + elif cmd == 'getName':
    + if len(args) != 0:
    + print('getName requires 0 args')
    + sys.exit(1)
    + pp.pprint(client.getName())
    +
    + elif cmd == 'getVersion':
    + if len(args) != 0:
    + print('getVersion requires 0 args')
    + sys.exit(1)
    + pp.pprint(client.getVersion())
    +
    + elif cmd == 'getStatus':
    + if len(args) != 0:
    + print('getStatus requires 0 args')
    + sys.exit(1)
    + pp.pprint(client.getStatus())
    +
    + elif cmd == 'getStatusDetails':
    + if len(args) != 0:
    + print('getStatusDetails requires 0 args')
    + sys.exit(1)
    + pp.pprint(client.getStatusDetails())
    +
    + elif cmd == 'getCounters':
    + if len(args) != 0:
    + print('getCounters requires 0 args')
    + sys.exit(1)
    + pp.pprint(client.getCounters())
    +
    + elif cmd == 'getCounter':
    + if len(args) != 1:
    + print('getCounter requires 1 args')
    + sys.exit(1)
    + pp.pprint(client.getCounter(args[0],))
    +
    + elif cmd == 'setOption':
    + if len(args) != 2:
    + print('setOption requires 2 args')
    + sys.exit(1)
    + pp.pprint(client.setOption(args[0],args[1],))
    +
    + elif cmd == 'getOption':
    + if len(args) != 1:
    + print('getOption requires 1 args')
    + sys.exit(1)
    + pp.pprint(client.getOption(args[0],))
    +
    + elif cmd == 'getOptions':
    + if len(args) != 0:
    + print('getOptions requires 0 args')
    + sys.exit(1)
    + pp.pprint(client.getOptions())
    +
    ++elif cmd == 'getCpuProfile':
    ++ if len(args) != 1:
    ++ print('getCpuProfile requires 1 args')
    ++ sys.exit(1)
    ++ pp.pprint(client.getCpuProfile(eval(args[0]),))
    ++
    + elif cmd == 'aliveSince':
    + if len(args) != 0:
    + print('aliveSince requires 0 args')
    + sys.exit(1)
    + pp.pprint(client.aliveSince())
    +
      -elif cmd == 'getLimitedReflection':
      - if len(args) != 0:
      - print('getLimitedReflection requires 0 args')
      - sys.exit(1)
      - pp.pprint(client.getLimitedReflection())
      -
    + elif cmd == 'reinitialize':
    + if len(args) != 0:
    + print('reinitialize requires 0 args')
    + sys.exit(1)
    + pp.pprint(client.reinitialize())
    +
    + elif cmd == 'shutdown':
    + if len(args) != 0:
    + print('shutdown requires 0 args')
    + sys.exit(1)
    + pp.pprint(client.shutdown())
    +
       else:
    - print 'Unrecognized method %s' % cmd
    + print('Unrecognized method %s' % cmd)
         sys.exit(1)

       transport.close()

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
    index 4aad3aa,9e460f0..dd75b01
    --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
    +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
    @@@ -1018,9 -1021,6 +1021,9 @@@ class Iface(fb303.FacebookService.Iface
           """
           pass

    - def flushCache(self, ):
    ++ def flushCache(self):
      + pass
      +

       class Client(fb303.FacebookService.Client, Iface):
         """
    @@@ -5438,29 -5589,6 +5592,30 @@@
             return result.success
           raise TApplicationException(TApplicationException.MISSING_RESULT, "fire_listener_event failed: unknown result");

    - def flushCache(self, ):
    ++ def flushCache(self):
      + self.send_flushCache()
      + self.recv_flushCache()
      +
    - def send_flushCache(self, ):
    ++ def send_flushCache(self):
      + self._oprot.writeMessageBegin('flushCache', TMessageType.CALL, self._seqid)
      + args = flushCache_args()
      + args.write(self._oprot)
      + self._oprot.writeMessageEnd()
      + self._oprot.trans.flush()
      +
    - def recv_flushCache(self, ):
    - (fname, mtype, rseqid) = self._iprot.readMessageBegin()
    ++ def recv_flushCache(self):
    ++ iprot = self._iprot
    ++ (fname, mtype, rseqid) = iprot.readMessageBegin()
      + if mtype == TMessageType.EXCEPTION:
      + x = TApplicationException()
    - x.read(self._iprot)
    - self._iprot.readMessageEnd()
    ++ x.read(iprot)
    ++ iprot.readMessageEnd()
      + raise x
      + result = flushCache_result()
    - result.read(self._iprot)
    - self._iprot.readMessageEnd()
    ++ result.read(iprot)
    ++ iprot.readMessageEnd()
      + return
      +

       class Processor(fb303.FacebookService.Processor, Iface, TProcessor):
         def __init__(self, handler):
    @@@ -27411,87 -29264,3 +29303,95 @@@ class fire_listener_event_result

         def __ne__(self, other):
           return not (self == other)
      +
      +class flushCache_args:
      +
      + thrift_spec = (
      + )
      +
      + def read(self, iprot):
      + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
      + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
      + return
      + iprot.readStructBegin()
      + while True:
      + (fname, ftype, fid) = iprot.readFieldBegin()
      + if ftype == TType.STOP:
      + break
      + else:
      + iprot.skip(ftype)
      + iprot.readFieldEnd()
      + iprot.readStructEnd()
      +
      + def write(self, oprot):
      + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
      + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
      + return
      + oprot.writeStructBegin('flushCache_args')
      + oprot.writeFieldStop()
      + oprot.writeStructEnd()
      +
      + def validate(self):
      + return
      +
      +
    ++ def __hash__(self):
    ++ value = 17
    ++ return value
    ++
      + def __repr__(self):
      + L = ['%s=%r' % (key, value)
      + for key, value in self.__dict__.iteritems()]
      + return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
      +
      + def __eq__(self, other):
      + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
      +
      + def __ne__(self, other):
      + return not (self == other)
      +
      +class flushCache_result:
      +
      + thrift_spec = (
      + )
      +
      + def read(self, iprot):
      + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
      + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
      + return
      + iprot.readStructBegin()
      + while True:
      + (fname, ftype, fid) = iprot.readFieldBegin()
      + if ftype == TType.STOP:
      + break
      + else:
      + iprot.skip(ftype)
      + iprot.readFieldEnd()
      + iprot.readStructEnd()
      +
      + def write(self, oprot):
      + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
      + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
      + return
      + oprot.writeStructBegin('flushCache_result')
      + oprot.writeFieldStop()
      + oprot.writeStructEnd()
      +
      + def validate(self):
      + return
      +
      +
    ++ def __hash__(self):
    ++ value = 17
    ++ return value
    ++
      + def __repr__(self):
      + L = ['%s=%r' % (key, value)
      + for key, value in self.__dict__.iteritems()]
      + return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
      +
      + def __eq__(self, other):
      + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
      +
      + def __ne__(self, other):
      + return not (self == other)

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
    ----------------------------------------------------------------------

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    ----------------------------------------------------------------------

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
    ----------------------------------------------------------------------

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
    ----------------------------------------------------------------------

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java
    ----------------------------------------------------------------------

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java
    ----------------------------------------------------------------------
  • Sershe at Aug 17, 2015 at 10:00 pm
    HIVE-11571: Fix Hive PTest2 logging configuration (Gopal V reviewed by Sergey Shelukhin)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/3071ce96
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/3071ce96
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/3071ce96

    Branch: refs/heads/hbase-metastore
    Commit: 3071ce96b6b8635f668d0698c18a727bea1b1de1
    Parents: fe1efe5
    Author: Prasanth Jayachandran <j.prasanth.j@gmail.com>
    Authored: Fri Aug 14 15:40:48 2015 -0700
    Committer: Prasanth Jayachandran <j.prasanth.j@gmail.com>
    Committed: Fri Aug 14 15:40:48 2015 -0700

    ----------------------------------------------------------------------
      testutils/ptest2/src/main/resources/log4j2.xml | 1 +
      1 file changed, 1 insertion(+)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/3071ce96/testutils/ptest2/src/main/resources/log4j2.xml
    ----------------------------------------------------------------------
    diff --git a/testutils/ptest2/src/main/resources/log4j2.xml b/testutils/ptest2/src/main/resources/log4j2.xml
    index 6502ad1..42141b7 100644
    --- a/testutils/ptest2/src/main/resources/log4j2.xml
    +++ b/testutils/ptest2/src/main/resources/log4j2.xml
    @@ -75,5 +75,6 @@
          <Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
            <AppenderRef ref="${sys:hive.ptest.root.logger}"/>
          </Logger>
    + </Loggers>

      </Configuration>
  • Sershe at Aug 17, 2015 at 10:00 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/itests/pom.xml
    ----------------------------------------------------------------------
    diff --git a/itests/pom.xml b/itests/pom.xml
    index f156cc4..acce713 100644
    --- a/itests/pom.xml
    +++ b/itests/pom.xml
    @@ -94,7 +94,7 @@
                            }
                            mkdir -p $DOWNLOAD_DIR
                            download "http://d3jw87u4immizc.cloudfront.net/spark-tarball/spark-${spark.version}-bin-hadoop2-without-hive.tgz" "spark"
    - cp -f $HIVE_ROOT/data/conf/spark/log4j.properties $BASE_DIR/spark/conf/
    + cp -f $HIVE_ROOT/data/conf/spark/log4j2.xml $BASE_DIR/spark/conf/
                            sed '/package /d' ${basedir}/${hive.path.to.root}/contrib/src/java/org/apache/hadoop/hive/contrib/udf/example/UDFExampleAdd.java > /tmp/UDFExampleAdd.java
                            javac -cp ${settings.localRepository}/org/apache/hive/hive-exec/${project.version}/hive-exec-${project.version}.jar /tmp/UDFExampleAdd.java -d /tmp
                            jar -cf /tmp/udfexampleadd-1.0.jar -C /tmp UDFExampleAdd.class

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    index b33cb58..65117c4 100644
    --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
    @@ -5931,7 +5931,7 @@ public class HiveMetaStore extends ThriftHiveMetastore {

          // If the log4j.configuration property hasn't already been explicitly set,
          // use Hive's default log4j configuration
    - if (System.getProperty("log4j.configuration") == null) {
    + if (System.getProperty("log4j.configurationFile") == null) {
            // NOTE: It is critical to do this here so that log4j is reinitialized
            // before any of the other core hive classes are loaded
            try {

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
    index ad99427..df42f1a 100644
    --- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
    +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java
    @@ -17,15 +17,11 @@
       */
      package org.apache.hadoop.hive.metastore.txn;

    -import org.apache.commons.logging.Log;
    -import org.apache.commons.logging.LogFactory;
    -import org.apache.hadoop.hive.conf.HiveConf;
    -import org.apache.hadoop.hive.metastore.api.*;
    -import org.apache.log4j.Level;
    -import org.apache.log4j.LogManager;
    -import org.junit.After;
    -import org.junit.Before;
    -import org.junit.Test;
    +import static junit.framework.Assert.assertEquals;
    +import static junit.framework.Assert.assertNotNull;
    +import static junit.framework.Assert.assertNull;
    +import static junit.framework.Assert.assertTrue;
    +import static junit.framework.Assert.fail;

      import java.util.ArrayList;
      import java.util.Arrays;
    @@ -34,7 +30,29 @@ import java.util.Set;
      import java.util.SortedSet;
      import java.util.TreeSet;

    -import static junit.framework.Assert.*;
    +import org.apache.hadoop.hive.conf.HiveConf;
    +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
    +import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
    +import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
    +import org.apache.hadoop.hive.metastore.api.CompactionRequest;
    +import org.apache.hadoop.hive.metastore.api.CompactionType;
    +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
    +import org.apache.hadoop.hive.metastore.api.LockComponent;
    +import org.apache.hadoop.hive.metastore.api.LockLevel;
    +import org.apache.hadoop.hive.metastore.api.LockRequest;
    +import org.apache.hadoop.hive.metastore.api.LockResponse;
    +import org.apache.hadoop.hive.metastore.api.LockState;
    +import org.apache.hadoop.hive.metastore.api.LockType;
    +import org.apache.hadoop.hive.metastore.api.MetaException;
    +import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
    +import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
    +import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
    +import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
    +import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
    +import org.apache.hadoop.hive.metastore.api.UnlockRequest;
    +import org.junit.After;
    +import org.junit.Before;
    +import org.junit.Test;

      /**
       * Tests for TxnHandler.
    @@ -43,11 +61,9 @@ public class TestCompactionTxnHandler {

        private HiveConf conf = new HiveConf();
        private CompactionTxnHandler txnHandler;
    - static final private Log LOG = LogFactory.getLog(TestCompactionTxnHandler.class);

        public TestCompactionTxnHandler() throws Exception {
          TxnDbUtil.setConfValues(conf);
    - LogManager.getLogger(TxnHandler.class.getName()).setLevel(Level.DEBUG);
          tearDown();
        }


    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
    ----------------------------------------------------------------------
    diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
    index f478184..6dc0bd3 100644
    --- a/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
    +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java
    @@ -17,16 +17,11 @@
       */
      package org.apache.hadoop.hive.metastore.txn;

    -import org.apache.commons.logging.Log;
    -import org.apache.commons.logging.LogFactory;
    -import org.apache.hadoop.hive.conf.HiveConf;
    -import org.apache.hadoop.hive.metastore.api.*;
    -import org.apache.log4j.Level;
    -import org.apache.log4j.LogManager;
    -import org.junit.After;
    -import org.junit.Before;
    -import org.junit.Ignore;
    -import org.junit.Test;
    +import static junit.framework.Assert.assertEquals;
    +import static junit.framework.Assert.assertFalse;
    +import static junit.framework.Assert.assertNull;
    +import static junit.framework.Assert.assertTrue;
    +import static junit.framework.Assert.fail;

      import java.sql.Connection;
      import java.sql.SQLException;
    @@ -36,21 +31,66 @@ import java.util.List;
      import java.util.concurrent.TimeUnit;
      import java.util.concurrent.atomic.AtomicBoolean;

    -import static junit.framework.Assert.*;
    +import org.apache.hadoop.hive.conf.HiveConf;
    +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
    +import org.apache.hadoop.hive.metastore.api.CheckLockRequest;
    +import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
    +import org.apache.hadoop.hive.metastore.api.CompactionRequest;
    +import org.apache.hadoop.hive.metastore.api.CompactionType;
    +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
    +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
    +import org.apache.hadoop.hive.metastore.api.HeartbeatRequest;
    +import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest;
    +import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
    +import org.apache.hadoop.hive.metastore.api.LockComponent;
    +import org.apache.hadoop.hive.metastore.api.LockLevel;
    +import org.apache.hadoop.hive.metastore.api.LockRequest;
    +import org.apache.hadoop.hive.metastore.api.LockResponse;
    +import org.apache.hadoop.hive.metastore.api.LockState;
    +import org.apache.hadoop.hive.metastore.api.LockType;
    +import org.apache.hadoop.hive.metastore.api.MetaException;
    +import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
    +import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
    +import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
    +import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
    +import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
    +import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
    +import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
    +import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
    +import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
    +import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
    +import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
    +import org.apache.hadoop.hive.metastore.api.TxnInfo;
    +import org.apache.hadoop.hive.metastore.api.TxnOpenException;
    +import org.apache.hadoop.hive.metastore.api.TxnState;
    +import org.apache.hadoop.hive.metastore.api.UnlockRequest;
    +import org.apache.logging.log4j.Level;
    +import org.apache.logging.log4j.LogManager;
    +import org.apache.logging.log4j.core.LoggerContext;
    +import org.apache.logging.log4j.core.config.Configuration;
    +import org.junit.After;
    +import org.junit.Before;
    +import org.junit.Ignore;
    +import org.junit.Test;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;

      /**
       * Tests for TxnHandler.
       */
      public class TestTxnHandler {
        static final private String CLASS_NAME = TxnHandler.class.getName();
    - static final private Log LOG = LogFactory.getLog(CLASS_NAME);
    + private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME);

        private HiveConf conf = new HiveConf();
        private TxnHandler txnHandler;

        public TestTxnHandler() throws Exception {
          TxnDbUtil.setConfValues(conf);
    - LogManager.getLogger(TxnHandler.class.getName()).setLevel(Level.DEBUG);
    + LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
    + Configuration conf = ctx.getConfiguration();
    + conf.getLoggerConfig(CLASS_NAME).setLevel(Level.DEBUG);
    + ctx.updateLoggers(conf);
          tearDown();
        }


    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/packaging/src/main/assembly/bin.xml
    ----------------------------------------------------------------------
    diff --git a/packaging/src/main/assembly/bin.xml b/packaging/src/main/assembly/bin.xml
    index 63253c5..0fa6af8 100644
    --- a/packaging/src/main/assembly/bin.xml
    +++ b/packaging/src/main/assembly/bin.xml
    @@ -275,7 +275,7 @@
            <fileMode>644</fileMode>
            <includes>
              <include>webhcat-default.xml</include>
    - <include>webhcat-log4j.properties</include>
    + <include>webhcat-log4j2.xml</include>
            </includes>
            <filtered>true</filtered>
            <outputDirectory>hcatalog/etc/webhcat</outputDirectory>
    @@ -323,19 +323,19 @@

        <files>
          <file>
    - <source>${project.parent.basedir}/common/src/main/resources/hive-log4j.properties</source>
    + <source>${project.parent.basedir}/common/src/main/resources/hive-log4j2.xml</source>
            <outputDirectory>conf</outputDirectory>
    - <destName>hive-log4j.properties.template</destName>
    + <destName>hive-log4j2.xml.template</destName>
          </file>
          <file>
    - <source>${project.parent.basedir}/ql/src/main/resources/hive-exec-log4j.properties</source>
    + <source>${project.parent.basedir}/ql/src/main/resources/hive-exec-log4j2.xml</source>
            <outputDirectory>conf</outputDirectory>
    - <destName>hive-exec-log4j.properties.template</destName>
    + <destName>hive-exec-log4j2.xml.template</destName>
          </file>
          <file>
    - <source>${project.parent.basedir}/beeline/src/main/resources/beeline-log4j.properties</source>
    + <source>${project.parent.basedir}/beeline/src/main/resources/beeline-log4j2.xml</source>
            <outputDirectory>conf</outputDirectory>
    - <destName>beeline-log4j.properties.template</destName>
    + <destName>beeline-log4j2.xml.template</destName>
          </file>
          <file>
            <source>${project.parent.basedir}/hcatalog/README.txt</source>

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/pom.xml
    ----------------------------------------------------------------------
    diff --git a/pom.xml b/pom.xml
    index 0383e01..15c2805 100644
    --- a/pom.xml
    +++ b/pom.xml
    @@ -148,8 +148,7 @@
          <kryo.version>2.22</kryo.version>
          <libfb303.version>0.9.2</libfb303.version>
          <libthrift.version>0.9.2</libthrift.version>
    - <log4j.version>1.2.16</log4j.version>
    - <log4j-extras.version>1.2.17</log4j-extras.version>
    + <log4j2.version>2.3</log4j2.version>
          <opencsv.version>2.3</opencsv.version>
          <mockito-all.version>1.9.5</mockito-all.version>
          <mina.version>2.0.0-M5</mina.version>
    @@ -366,14 +365,24 @@
              <version>${junit.version}</version>
            </dependency>
            <dependency>
    - <groupId>log4j</groupId>
    - <artifactId>log4j</artifactId>
    - <version>${log4j.version}</version>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-1.2-api</artifactId>
    + <version>${log4j2.version}</version>
            </dependency>
            <dependency>
    - <groupId>log4j</groupId>
    - <artifactId>apache-log4j-extras</artifactId>
    - <version>${log4j-extras.version}</version>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-web</artifactId>
    + <version>${log4j2.version}</version>
    + </dependency>
    + <dependency>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-slf4j-impl</artifactId>
    + <version>${log4j2.version}</version>
    + </dependency>
    + <dependency>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-jcl</artifactId>
    + <version>${log4j2.version}</version>
            </dependency>
            <dependency>
              <groupId>org.antlr</groupId>
    @@ -584,11 +593,6 @@
              <version>${slf4j.version}</version>
            </dependency>
            <dependency>
    - <groupId>org.slf4j</groupId>
    - <artifactId>slf4j-log4j12</artifactId>
    - <version>${slf4j.version}</version>
    - </dependency>
    - <dependency>
              <groupId>xerces</groupId>
              <artifactId>xercesImpl</artifactId>
              <version>${xerces.version}</version>
    @@ -604,11 +608,6 @@
            <artifactId>slf4j-api</artifactId>
            <version>${slf4j.version}</version>
          </dependency>
    - <dependency>
    - <groupId>org.slf4j</groupId>
    - <artifactId>slf4j-log4j12</artifactId>
    - <version>${slf4j.version}</version>
    - </dependency>
        </dependencies>

        <build>
    @@ -872,7 +871,7 @@
                  <!-- required for hive-exec jar path and tests which reference a jar -->
                  <maven.local.repository>${maven.repo.local}</maven.local.repository>
                  <mapred.job.tracker>local</mapred.job.tracker>
    - <log4j.configuration>${test.log4j.scheme}${test.tmp.dir}/conf/hive-log4j.properties</log4j.configuration>
    + <log4j.configurationFile>${test.log4j.scheme}${test.tmp.dir}/conf/hive-log4j2.xml</log4j.configurationFile>
                  <log4j.debug>true</log4j.debug>
                  <!-- don't diry up /tmp -->
                  <java.io.tmpdir>${test.tmp.dir}</java.io.tmpdir>

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/pom.xml
    ----------------------------------------------------------------------
    diff --git a/ql/pom.xml b/ql/pom.xml
    index e7a8e7b..36b3433 100644
    --- a/ql/pom.xml
    +++ b/ql/pom.xml
    @@ -112,14 +112,19 @@
            <version>${javolution.version}</version>
          </dependency>
          <dependency>
    - <groupId>log4j</groupId>
    - <artifactId>log4j</artifactId>
    - <version>${log4j.version}</version>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-1.2-api</artifactId>
    + <version>${log4j2.version}</version>
          </dependency>
          <dependency>
    - <groupId>log4j</groupId>
    - <artifactId>apache-log4j-extras</artifactId>
    - <version>${log4j-extras.version}</version>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-slf4j-impl</artifactId>
    + <version>${log4j2.version}</version>
    + </dependency>
    + <dependency>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-jcl</artifactId>
    + <version>${log4j2.version}</version>
          </dependency>
          <dependency>
            <groupId>org.antlr</groupId>

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
    index a2cf712..82345ee 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java
    @@ -27,7 +27,6 @@ import java.lang.management.MemoryMXBean;
      import java.util.ArrayList;
      import java.util.Collection;
      import java.util.Collections;
    -import java.util.Enumeration;
      import java.util.List;
      import java.util.Properties;

    @@ -57,13 +56,14 @@ import org.apache.hadoop.hive.ql.exec.PartitionKeySampler;
      import org.apache.hadoop.hive.ql.exec.TableScanOperator;
      import org.apache.hadoop.hive.ql.exec.Task;
      import org.apache.hadoop.hive.ql.exec.Utilities;
    -import org.apache.hadoop.hive.ql.exec.tez.TezSessionState;
      import org.apache.hadoop.hive.ql.exec.tez.TezSessionPoolManager;
    +import org.apache.hadoop.hive.ql.exec.tez.TezSessionState;
      import org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat;
      import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
      import org.apache.hadoop.hive.ql.io.HiveKey;
      import org.apache.hadoop.hive.ql.io.HiveOutputFormatImpl;
      import org.apache.hadoop.hive.ql.io.IOPrepareCache;
    +import org.apache.hadoop.hive.ql.log.NullAppender;
      import org.apache.hadoop.hive.ql.metadata.HiveException;
      import org.apache.hadoop.hive.ql.plan.FetchWork;
      import org.apache.hadoop.hive.ql.plan.MapWork;
    @@ -88,11 +88,12 @@ import org.apache.hadoop.mapred.JobConf;
      import org.apache.hadoop.mapred.Partitioner;
      import org.apache.hadoop.mapred.RunningJob;
      import org.apache.hadoop.security.UserGroupInformation;
    -import org.apache.log4j.Appender;
    -import org.apache.log4j.BasicConfigurator;
    -import org.apache.log4j.FileAppender;
    -import org.apache.log4j.LogManager;
    -import org.apache.log4j.varia.NullAppender;
    +import org.apache.logging.log4j.Level;
    +import org.apache.logging.log4j.LogManager;
    +import org.apache.logging.log4j.Logger;
    +import org.apache.logging.log4j.core.Appender;
    +import org.apache.logging.log4j.core.appender.FileAppender;
    +import org.apache.logging.log4j.core.appender.RollingFileAppender;

      /**
       * ExecDriver is the central class in co-ordinating execution of any map-reduce task.
    @@ -687,8 +688,10 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop
          if (noLog) {
            // If started from main(), and noLog is on, we should not output
            // any logs. To turn the log on, please set -Dtest.silent=false
    - BasicConfigurator.resetConfiguration();
    - BasicConfigurator.configure(new NullAppender());
    + Logger logger = org.apache.logging.log4j.LogManager.getRootLogger();
    + NullAppender appender = NullAppender.createNullAppender();
    + appender.addToLogger(logger.getName(), Level.ERROR);
    + appender.start();
          } else {
            setupChildLog4j(conf);
          }
    @@ -703,10 +706,12 @@ public class ExecDriver extends Task<MapredWork> implements Serializable, Hadoop

          // print out the location of the log file for the user so
          // that it's easy to find reason for local mode execution failures
    - for (Appender appender : Collections.list((Enumeration<Appender>) LogManager.getRootLogger()
    - .getAllAppenders())) {
    + for (Appender appender : ((org.apache.logging.log4j.core.Logger) LogManager.getRootLogger())
    + .getAppenders().values()) {
            if (appender instanceof FileAppender) {
    - console.printInfo("Execution log at: " + ((FileAppender) appender).getFile());
    + console.printInfo("Execution log at: " + ((FileAppender) appender).getFileName());
    + } else if (appender instanceof RollingFileAppender) {
    + console.printInfo("Execution log at: " + ((RollingFileAppender) appender).getFileName());
            }
          }


    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
    index 6a6593c..44dfe3e 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java
    @@ -24,7 +24,6 @@ import java.text.SimpleDateFormat;
      import java.util.ArrayList;
      import java.util.Calendar;
      import java.util.Collections;
    -import java.util.Enumeration;
      import java.util.HashMap;
      import java.util.LinkedList;
      import java.util.List;
    @@ -33,8 +32,6 @@ import java.util.concurrent.TimeUnit;

      import org.apache.commons.logging.Log;
      import org.apache.commons.logging.LogFactory;
    -import org.apache.hadoop.fs.Path;
    -import org.apache.hadoop.hive.common.JavaUtils;
      import org.apache.hadoop.hive.conf.HiveConf;
      import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
      import org.apache.hadoop.hive.ql.MapRedStats;
    @@ -59,9 +56,11 @@ import org.apache.hadoop.mapred.JobStatus;
      import org.apache.hadoop.mapred.RunningJob;
      import org.apache.hadoop.mapred.TaskCompletionEvent;
      import org.apache.hadoop.mapred.TaskReport;
    -import org.apache.log4j.Appender;
    -import org.apache.log4j.FileAppender;
    -import org.apache.log4j.LogManager;
    +import org.apache.logging.log4j.LogManager;
    +import org.apache.logging.log4j.core.Appender;
    +import org.apache.logging.log4j.core.Logger;
    +import org.apache.logging.log4j.core.appender.FileAppender;
    +import org.apache.logging.log4j.core.appender.RollingFileAppender;

      public class HadoopJobExecHelper {

    @@ -492,10 +491,11 @@ public class HadoopJobExecHelper {
          sb.append("Logs:\n");
          console.printError(sb.toString());

    - for (Appender a : Collections.list((Enumeration<Appender>)
    - LogManager.getRootLogger().getAllAppenders())) {
    - if (a instanceof FileAppender) {
    - console.printError((new Path(((FileAppender)a).getFile())).toUri().getPath());
    + for (Appender appender : ((Logger) LogManager.getRootLogger()).getAppenders().values()) {
    + if (appender instanceof FileAppender) {
    + console.printError(((FileAppender) appender).getFileName());
    + } else if (appender instanceof RollingFileAppender) {
    + console.printError(((RollingFileAppender) appender).getFileName());
            }
          }
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
    index 3cb9e9c..cee0878 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/stats/PartialScanTask.java
    @@ -21,8 +21,6 @@ package org.apache.hadoop.hive.ql.io.rcfile.stats;
      import java.io.IOException;
      import java.io.Serializable;
      import java.util.ArrayList;
    -import java.util.Collections;
    -import java.util.Enumeration;
      import java.util.List;

      import org.apache.commons.lang.StringUtils;
    @@ -59,9 +57,11 @@ import org.apache.hadoop.mapred.InputFormat;
      import org.apache.hadoop.mapred.JobClient;
      import org.apache.hadoop.mapred.JobConf;
      import org.apache.hadoop.mapred.RunningJob;
    -import org.apache.log4j.Appender;
    -import org.apache.log4j.FileAppender;
    -import org.apache.log4j.LogManager;
    +import org.apache.logging.log4j.LogManager;
    +import org.apache.logging.log4j.core.Appender;
    +import org.apache.logging.log4j.core.Logger;
    +import org.apache.logging.log4j.core.appender.FileAppender;
    +import org.apache.logging.log4j.core.appender.RollingFileAppender;

      /**
       * PartialScanTask.
    @@ -335,15 +335,15 @@ public class PartialScanTask extends Task<PartialScanWork> implements

          // print out the location of the log file for the user so
          // that it's easy to find reason for local mode execution failures
    - for (Appender appender : Collections
    - .list((Enumeration<Appender>) LogManager.getRootLogger()
    - .getAllAppenders())) {
    + for (Appender appender : ((Logger) LogManager.getRootLogger()).getAppenders().values()) {
            if (appender instanceof FileAppender) {
    - console.printInfo("Execution log at: "
    - + ((FileAppender) appender).getFile());
    + console.printInfo("Execution log at: " + ((FileAppender) appender).getFileName());
    + } else if (appender instanceof RollingFileAppender) {
    + console.printInfo("Execution log at: " + ((RollingFileAppender) appender).getFileName());
            }
          }

    +
          PartialScanWork mergeWork = new PartialScanWork(inputPaths);
          DriverContext driverCxt = new DriverContext();
          PartialScanTask taskExec = new PartialScanTask();

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/log/HiveEventCounter.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/HiveEventCounter.java b/ql/src/java/org/apache/hadoop/hive/ql/log/HiveEventCounter.java
    new file mode 100644
    index 0000000..46662c4
    --- /dev/null
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/log/HiveEventCounter.java
    @@ -0,0 +1,135 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + * <p/>
    + * http://www.apache.org/licenses/LICENSE-2.0
    + * <p/>
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.hadoop.hive.ql.log;
    +
    +import java.io.Serializable;
    +import java.util.concurrent.atomic.AtomicLongArray;
    +
    +import org.apache.hadoop.hive.common.classification.InterfaceAudience;
    +import org.apache.logging.log4j.Level;
    +import org.apache.logging.log4j.LogManager;
    +import org.apache.logging.log4j.core.Filter;
    +import org.apache.logging.log4j.core.Layout;
    +import org.apache.logging.log4j.core.LogEvent;
    +import org.apache.logging.log4j.core.LoggerContext;
    +import org.apache.logging.log4j.core.appender.AbstractAppender;
    +import org.apache.logging.log4j.core.config.Configuration;
    +import org.apache.logging.log4j.core.config.LoggerConfig;
    +import org.apache.logging.log4j.core.config.plugins.Plugin;
    +import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
    +import org.apache.logging.log4j.core.config.plugins.PluginElement;
    +import org.apache.logging.log4j.core.config.plugins.PluginFactory;
    +import org.apache.logging.log4j.core.layout.PatternLayout;
    +
    +import com.google.common.annotations.VisibleForTesting;
    +
    +/**
    + * A log4J2 Appender that simply counts logging events in four levels:
    + * fatal, error, warn and info. The class name is used in log4j2.xml
    + */
    +@Plugin(name = "HiveEventCounter", category = "Core", elementType = "appender", printObject = true)
    +public class HiveEventCounter extends AbstractAppender {
    + private static LoggerContext context = (LoggerContext) LogManager.getContext(false);
    + private static Configuration configuration = context.getConfiguration();
    + private static final String APPENDER_NAME = "HiveEventCounter";
    + private static final int FATAL = 0;
    + private static final int ERROR = 1;
    + private static final int WARN = 2;
    + private static final int INFO = 3;
    +
    + private static class EventCounts {
    + private final AtomicLongArray counts = new AtomicLongArray(4);
    +
    + private void incr(int i) {
    + counts.incrementAndGet(i);
    + }
    +
    + private long get(int i) {
    + return counts.get(i);
    + }
    + }
    +
    + private static EventCounts counts = new EventCounts();
    +
    + protected HiveEventCounter(String name, Filter filter,
    + Layout<? extends Serializable> layout, boolean ignoreExceptions) {
    + super(name, filter, layout, ignoreExceptions);
    + }
    +
    + @PluginFactory
    + public static HiveEventCounter createInstance(@PluginAttribute("name") String name,
    + @PluginAttribute("ignoreExceptions") boolean ignoreExceptions,
    + @PluginElement("Layout") Layout layout,
    + @PluginElement("Filters") Filter filter) {
    + if (name == null) {
    + name = APPENDER_NAME;
    + }
    +
    + if (layout == null) {
    + layout = PatternLayout.createDefaultLayout();
    + }
    + return new HiveEventCounter(name, filter, layout, ignoreExceptions);
    + }
    +
    + @InterfaceAudience.Private
    + public static long getFatal() {
    + return counts.get(FATAL);
    + }
    +
    + @InterfaceAudience.Private
    + public static long getError() {
    + return counts.get(ERROR);
    + }
    +
    + @InterfaceAudience.Private
    + public static long getWarn() {
    + return counts.get(WARN);
    + }
    +
    + @InterfaceAudience.Private
    + public static long getInfo() {
    + return counts.get(INFO);
    + }
    +
    + @VisibleForTesting
    + public void addToLogger(String loggerName, Level level) {
    + LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName);
    + loggerConfig.addAppender(this, level, null);
    + context.updateLoggers();
    + }
    +
    + @VisibleForTesting
    + public void removeFromLogger(String loggerName) {
    + LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName);
    + loggerConfig.removeAppender(APPENDER_NAME);
    + context.updateLoggers();
    + }
    +
    + public void append(LogEvent event) {
    + Level level = event.getLevel();
    + if (level.equals(Level.INFO)) {
    + counts.incr(INFO);
    + } else if (level.equals(Level.WARN)) {
    + counts.incr(WARN);
    + } else if (level.equals(Level.ERROR)) {
    + counts.incr(ERROR);
    + } else if (level.equals(Level.FATAL)) {
    + counts.incr(FATAL);
    + }
    + }
    +}

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java b/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java
    new file mode 100644
    index 0000000..c4cb7dd
    --- /dev/null
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/log/NullAppender.java
    @@ -0,0 +1,63 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + * <p/>
    + * http://www.apache.org/licenses/LICENSE-2.0
    + * <p/>
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.hadoop.hive.ql.log;
    +
    +import java.io.Serializable;
    +
    +import org.apache.logging.log4j.Level;
    +import org.apache.logging.log4j.LogManager;
    +import org.apache.logging.log4j.core.Filter;
    +import org.apache.logging.log4j.core.Layout;
    +import org.apache.logging.log4j.core.LogEvent;
    +import org.apache.logging.log4j.core.LoggerContext;
    +import org.apache.logging.log4j.core.appender.AbstractAppender;
    +import org.apache.logging.log4j.core.config.Configuration;
    +import org.apache.logging.log4j.core.config.LoggerConfig;
    +import org.apache.logging.log4j.core.config.plugins.Plugin;
    +import org.apache.logging.log4j.core.config.plugins.PluginFactory;
    +import org.apache.logging.log4j.core.layout.PatternLayout;
    +
    +/**
    + * A NullAppender merely exists, it never outputs a message to any device.
    + */
    +@Plugin(name = "NullAppender", category = "Core", elementType = "appender", printObject = false)
    +public class NullAppender extends AbstractAppender {
    +
    + private static LoggerContext context = (LoggerContext) LogManager.getContext(false);
    + private static Configuration configuration = context.getConfiguration();
    +
    + protected NullAppender(String name, Filter filter,
    + Layout<? extends Serializable> layout, boolean ignoreExceptions) {
    + super(name, filter, layout, ignoreExceptions);
    + }
    +
    + @PluginFactory
    + public static NullAppender createNullAppender() {
    + return new NullAppender("NullAppender", null, PatternLayout.createDefaultLayout(), true);
    + }
    +
    + public void addToLogger(String loggerName, Level level) {
    + LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName);
    + loggerConfig.addAppender(this, level, null);
    + context.updateLoggers();
    + }
    +
    + public void append(LogEvent event) {
    + // no-op
    + }
    +}

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/log/PidDailyRollingFileAppender.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/PidDailyRollingFileAppender.java b/ql/src/java/org/apache/hadoop/hive/ql/log/PidDailyRollingFileAppender.java
    deleted file mode 100644
    index 6a59d4a..0000000
    --- a/ql/src/java/org/apache/hadoop/hive/ql/log/PidDailyRollingFileAppender.java
    +++ /dev/null
    @@ -1,33 +0,0 @@
    -/**
    - * Licensed to the Apache Software Foundation (ASF) under one
    - * or more contributor license agreements. See the NOTICE file
    - * distributed with this work for additional information
    - * regarding copyright ownership. The ASF licenses this file
    - * to you under the Apache License, Version 2.0 (the
    - * "License"); you may not use this file except in compliance
    - * with the License. You may obtain a copy of the License at
    - *
    - * http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - */
    -
    -package org.apache.hadoop.hive.ql.log;
    -
    -import java.lang.management.ManagementFactory;
    -import java.lang.management.RuntimeMXBean;
    -
    -import org.apache.log4j.DailyRollingFileAppender;
    -
    -public class PidDailyRollingFileAppender extends DailyRollingFileAppender {
    -
    - @Override
    - public void setFile(String file) {
    - RuntimeMXBean rt = ManagementFactory.getRuntimeMXBean();
    - super.setFile(file + '.' + rt.getName());
    - }
    -}

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
    new file mode 100644
    index 0000000..4db10bb
    --- /dev/null
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/log/PidFilePatternConverter.java
    @@ -0,0 +1,62 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + * <p/>
    + * http://www.apache.org/licenses/LICENSE-2.0
    + * <p/>
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.hadoop.hive.ql.log;
    +
    +import java.lang.management.ManagementFactory;
    +
    +import org.apache.logging.log4j.core.config.plugins.Plugin;
    +import org.apache.logging.log4j.core.config.plugins.PluginFactory;
    +import org.apache.logging.log4j.core.pattern.AbstractPatternConverter;
    +import org.apache.logging.log4j.core.pattern.ArrayPatternConverter;
    +import org.apache.logging.log4j.core.pattern.ConverterKeys;
    +
    +/**
    + * FilePattern converter that converts %pid pattern to <process-id>@<hostname> information
    + * obtained at runtime.
    + *
    + * Example usage:
    + * <RollingFile name="Rolling-default" fileName="test.log" filePattern="test.log.%pid.gz">
    + *
    + * Will generate output file with name containing <process-id>@<hostname> like below
    + * test.log.95232@localhost.gz
    + */
    +@Plugin(name = "PidFilePatternConverter", category = "FileConverter")
    +@ConverterKeys({ "pid" })
    +public class PidFilePatternConverter extends AbstractPatternConverter implements
    + ArrayPatternConverter {
    +
    + /**
    + * Private constructor.
    + */
    + private PidFilePatternConverter() {
    + super("pid", "pid");
    + }
    +
    + @PluginFactory
    + public static PidFilePatternConverter newInstance() {
    + return new PidFilePatternConverter();
    + }
    +
    + public void format(StringBuilder toAppendTo, Object... objects) {
    + toAppendTo.append(ManagementFactory.getRuntimeMXBean().getName());
    + }
    +
    + public void format(Object obj, StringBuilder toAppendTo) {
    + toAppendTo.append(ManagementFactory.getRuntimeMXBean().getName());
    + }
    +}

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/main/resources/hive-exec-log4j.properties
    ----------------------------------------------------------------------
    diff --git a/ql/src/main/resources/hive-exec-log4j.properties b/ql/src/main/resources/hive-exec-log4j.properties
    deleted file mode 100644
    index 9eaa6b6..0000000
    --- a/ql/src/main/resources/hive-exec-log4j.properties
    +++ /dev/null
    @@ -1,77 +0,0 @@
    -# Licensed to the Apache Software Foundation (ASF) under one
    -# or more contributor license agreements. See the NOTICE file
    -# distributed with this work for additional information
    -# regarding copyright ownership. The ASF licenses this file
    -# to you under the Apache License, Version 2.0 (the
    -# "License"); you may not use this file except in compliance
    -# with the License. You may obtain a copy of the License at
    -#
    -# http://www.apache.org/licenses/LICENSE-2.0
    -#
    -# Unless required by applicable law or agreed to in writing, software
    -# distributed under the License is distributed on an "AS IS" BASIS,
    -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -# See the License for the specific language governing permissions and
    -# limitations under the License.
    -
    -# Define some default values that can be overridden by system properties
    -hive.log.threshold=ALL
    -hive.root.logger=INFO,FA
    -hive.log.dir=${java.io.tmpdir}/${user.name}
    -hive.query.id=hadoop
    -hive.log.file=${hive.query.id}.log
    -
    -# Define the root logger to the system property "hadoop.root.logger".
    -log4j.rootLogger=${hive.root.logger}, EventCounter
    -
    -# Logging Threshold
    -log4j.threshhold=${hive.log.threshold}
    -
    -#
    -# File Appender
    -#
    -
    -log4j.appender.FA=org.apache.log4j.FileAppender
    -log4j.appender.FA.File=${hive.log.dir}/${hive.log.file}
    -log4j.appender.FA.layout=org.apache.log4j.PatternLayout
    -
    -# Pattern format: Date LogLevel LoggerName LogMessage
    -#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
    -# Debugging Pattern format
    -log4j.appender.FA.layout.ConversionPattern=%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n
    -
    -
    -#
    -# console
    -# Add "console" to rootlogger above if you want to use this
    -#
    -
    -log4j.appender.console=org.apache.log4j.ConsoleAppender
    -log4j.appender.console.target=System.err
    -log4j.appender.console.layout=org.apache.log4j.PatternLayout
    -log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} [%t] %p %c{2}: %m%n
    -
    -#custom logging levels
    -#log4j.logger.xxx=DEBUG
    -
    -#
    -# Event Counter Appender
    -# Sends counts of logging messages at different severity levels to Hadoop Metrics.
    -#
    -log4j.appender.EventCounter=org.apache.hadoop.hive.shims.HiveEventCounter
    -
    -
    -log4j.category.DataNucleus=ERROR,FA
    -log4j.category.Datastore=ERROR,FA
    -log4j.category.Datastore.Schema=ERROR,FA
    -log4j.category.JPOX.Datastore=ERROR,FA
    -log4j.category.JPOX.Plugin=ERROR,FA
    -log4j.category.JPOX.MetaData=ERROR,FA
    -log4j.category.JPOX.Query=ERROR,FA
    -log4j.category.JPOX.General=ERROR,FA
    -log4j.category.JPOX.Enhancer=ERROR,FA
    -
    -
    -# Silence useless ZK logs
    -log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN,FA
    -log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN,FA

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/main/resources/hive-exec-log4j2.xml
    ----------------------------------------------------------------------
    diff --git a/ql/src/main/resources/hive-exec-log4j2.xml b/ql/src/main/resources/hive-exec-log4j2.xml
    new file mode 100644
    index 0000000..c93437c
    --- /dev/null
    +++ b/ql/src/main/resources/hive-exec-log4j2.xml
    @@ -0,0 +1,110 @@
    +<?xml version="1.0" encoding="UTF-8"?>
    +<!--
    + Licensed to the Apache Software Foundation (ASF) under one or more
    + contributor license agreements. See the NOTICE file distributed with
    + this work for additional information regarding copyright ownership.
    + The ASF licenses this file to You under the Apache License, Version 2.0
    + (the "License"); you may not use this file except in compliance with
    + the License. You may obtain a copy of the License at
    +
    + http://www.apache.org/licenses/LICENSE-2.0
    +
    + Unless required by applicable law or agreed to in writing, software
    + distributed under the License is distributed on an "AS IS" BASIS,
    + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + See the License for the specific language governing permissions and
    + limitations under the License.
    +-->
    +
    +<Configuration status="info" strict="true" name="HiveExecLog4j2"
    + packages="org.apache.hadoop.hive.ql.log">
    +
    + <Properties>
    + <Property name="hive.log.threshold">ALL</Property>
    + <Property name="hive.log.level">INFO</Property>
    + <Property name="hive.root.logger">FA</Property>
    + <Property name="hive.log.dir">${sys:java.io.tmpdir}/${sys:user.name}</Property>
    + <Property name="hive.query.id">hadoop</Property>
    + <Property name="hive.log.file">${sys:hive.query.id}.log</Property>
    + </Properties>
    +
    + <Appenders>
    + <Console name="console" target="SYSTEM_ERR">
    + <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} [%t]: %p %c{2}: %m%n"/>
    + </Console>
    +
    + <!-- Regular File Appender -->
    + <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
    + <File name="FA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}">
    + <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
    + </File>
    +
    + <!-- Daily Rolling File Appender -->
    + <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
    + <!-- Use %pid in the filePattern to append <process-id>@<host-name> to the filename if you want separate log files for different CLI session -->
    + <!-- <RollingFile name="DRFA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}"
    + filePattern="${sys:hive.log.dir}/${sys:hive.log.file}.%d{yyyy-MM-dd}">
    + <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
    + <Policies>
    + <TimeBasedTriggeringPolicy interval="1" modulate="true"/>
    + </Policies>
    + <DefaultRolloverStrategy max="30"/>
    + </RollingFile> -->
    +
    + <!-- Size based Rolling File Appender -->
    + <!-- NOTE: if enabling multiple file appender make sure to use different file names -->
    + <!-- <RollingFile name="RFA" fileName="${sys:hive.log.dir}/${sys:hive.log.file}"
    + filePattern="${sys:hive.log.dir}/${sys:hive.log.file}.%i">
    + <PatternLayout pattern="%d{ISO8601} %-5p [%t]: %c{2} (%F:%M(%L)) - %m%n" />
    + <Policies>
    + <SizeBasedTriggeringPolicy size="256 MB" />
    + </Policies>
    + <DefaultRolloverStrategy max="10"/>
    + </RollingFile> -->
    +
    + <!-- hiveeventcounter appender is loaded from configuration packages attribute.sends counts of logging messages@different severity levels to hadoop metrics. -->
    + <HiveEventCounter name="EventCounter"/>
    + </Appenders>
    +
    + <Loggers>
    + <Root level="${sys:hive.log.threshold}">
    + <AppenderRef ref="${sys:hive.root.logger}" level="${sys:hive.log.level}"/>
    + <AppenderRef ref="EventCounter" />
    + </Root>
    +
    + <Logger name="org.apache.zookeeper.server.NIOServerCnxn" level="WARN">
    + <AppenderRef ref="${sys:hive.root.logger}"/>
    + </Logger>
    + <Logger name="org.apache.zookeeper.ClientCnxnSocketNIO" level="WARN">
    + <AppenderRef ref="${sys:hive.root.logger}"/>
    + </Logger>
    + <Logger name="DataNucleus" level="ERROR">
    + <AppenderRef ref="${sys:hive.root.logger}"/>
    + </Logger>
    + <Logger name="Datastore" level="ERROR">
    + <AppenderRef ref="${sys:hive.root.logger}"/>
    + </Logger>
    + <Logger name="Datastore.Schema" level="ERROR">
    + <AppenderRef ref="${sys:hive.root.logger}"/>
    + </Logger>
    + <Logger name="JPOX.Datastore" level="ERROR">
    + <AppenderRef ref="${sys:hive.root.logger}"/>
    + </Logger>
    + <Logger name="JPOX.Plugin" level="ERROR">
    + <AppenderRef ref="${sys:hive.root.logger}"/>
    + </Logger>
    + <Logger name="JPOX.Metadata" level="ERROR">
    + <AppenderRef ref="${sys:hive.root.logger}"/>
    + </Logger>
    + <Logger name="JPOX.Query" level="ERROR">
    + <AppenderRef ref="${sys:hive.root.logger}"/>
    + </Logger>
    + <Logger name="JPOX.General" level="ERROR">
    + <AppenderRef ref="${sys:hive.root.logger}"/>
    + </Logger>
    + <Logger name="JPOX.Enhancer" level="ERROR">
    + <AppenderRef ref="${sys:hive.root.logger}"/>
    + </Logger>
    + </Loggers>
    +
    +</Configuration>

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/test/org/apache/hadoop/hive/ql/log/TestLog4j2Appenders.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/log/TestLog4j2Appenders.java b/ql/src/test/org/apache/hadoop/hive/ql/log/TestLog4j2Appenders.java
    new file mode 100644
    index 0000000..bdd837e
    --- /dev/null
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/log/TestLog4j2Appenders.java
    @@ -0,0 +1,95 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + * <p/>
    + * http://www.apache.org/licenses/LICENSE-2.0
    + * <p/>
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.hadoop.hive.ql.log;
    +
    +import static org.junit.Assert.assertEquals;
    +
    +import org.apache.hadoop.hive.ql.metadata.StringAppender;
    +import org.apache.logging.log4j.Level;
    +import org.apache.logging.log4j.LogManager;
    +import org.apache.logging.log4j.Logger;
    +import org.apache.logging.log4j.core.LoggerContext;
    +import org.apache.logging.log4j.core.config.Configuration;
    +import org.apache.logging.log4j.core.config.LoggerConfig;
    +import org.junit.Before;
    +import org.junit.Test;
    +
    +/**
    + *
    + */
    +public class TestLog4j2Appenders {
    +
    + @Before
    + public void setup() {
    + // programmatically set root logger level to INFO. By default if log4j2-test.xml is not
    + // available root logger will use ERROR log level
    + LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
    + Configuration config = ctx.getConfiguration();
    + LoggerConfig loggerConfig = config.getLoggerConfig(LogManager.ROOT_LOGGER_NAME);
    + loggerConfig.setLevel(Level.INFO);
    + ctx.updateLoggers();
    + }
    +
    + @Test
    + public void testStringAppender() throws Exception {
    + // Get the RootLogger which, if you don't have log4j2-test.xml defined, will only log ERRORs
    + Logger logger = LogManager.getRootLogger();
    + // Create a String Appender to capture log output
    + StringAppender appender = StringAppender.createStringAppender("%m");
    + appender.addToLogger(logger.getName(), Level.INFO);
    + appender.start();
    +
    + // Log to the string appender
    + logger.info("Hello!");
    + logger.info(" World");
    +
    + assertEquals("Hello! World", appender.getOutput());
    + appender.removeFromLogger(LogManager.getRootLogger().getName());
    + }
    +
    + @Test
    + public void testHiveEventCounterAppender() throws Exception {
    + Logger logger = LogManager.getRootLogger();
    + HiveEventCounter appender = HiveEventCounter.createInstance("EventCounter", true, null, null);
    + appender.addToLogger(logger.getName(), Level.INFO);
    + appender.start();
    +
    + logger.info("Test");
    + logger.info("Test");
    + logger.info("Test");
    + logger.info("Test");
    +
    + logger.error("Test");
    + logger.error("Test");
    + logger.error("Test");
    +
    + logger.warn("Test");
    + logger.warn("Test");
    +
    + logger.fatal("Test");
    +
    + // HiveEventCounter will be loaded from hive-log4j2-test.xml before tests are run. The 2 log
    + // info msgs from previous test case will also be counted along with 4 log info msgs in this
    + // test and hence we assert for 6 here
    + assertEquals(6, appender.getInfo());
    + assertEquals(3, appender.getError());
    + assertEquals(2, appender.getWarn());
    + assertEquals(1, appender.getFatal());
    + appender.removeFromLogger(LogManager.getRootLogger().getName());
    + }
    +}

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/test/org/apache/hadoop/hive/ql/metadata/StringAppender.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/StringAppender.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/StringAppender.java
    new file mode 100644
    index 0000000..17b64d6
    --- /dev/null
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/StringAppender.java
    @@ -0,0 +1,128 @@
    +/**
    + * Licensed to the Apache Software Foundation (ASF) under one
    + * or more contributor license agreements. See the NOTICE file
    + * distributed with this work for additional information
    + * regarding copyright ownership. The ASF licenses this file
    + * to you under the Apache License, Version 2.0 (the
    + * "License"); you may not use this file except in compliance
    + * with the License. You may obtain a copy of the License at
    + * <p/>
    + * http://www.apache.org/licenses/LICENSE-2.0
    + * <p/>
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +package org.apache.hadoop.hive.ql.metadata;
    +
    +import java.io.ByteArrayOutputStream;
    +import java.io.OutputStream;
    +import java.io.OutputStreamWriter;
    +import java.io.Serializable;
    +
    +import org.apache.logging.log4j.Level;
    +import org.apache.logging.log4j.LogManager;
    +import org.apache.logging.log4j.core.Filter;
    +import org.apache.logging.log4j.core.Layout;
    +import org.apache.logging.log4j.core.LoggerContext;
    +import org.apache.logging.log4j.core.appender.AbstractOutputStreamAppender;
    +import org.apache.logging.log4j.core.appender.OutputStreamManager;
    +import org.apache.logging.log4j.core.config.Configuration;
    +import org.apache.logging.log4j.core.config.LoggerConfig;
    +import org.apache.logging.log4j.core.config.plugins.Plugin;
    +import org.apache.logging.log4j.core.config.plugins.PluginAttribute;
    +import org.apache.logging.log4j.core.config.plugins.PluginFactory;
    +import org.apache.logging.log4j.core.layout.PatternLayout;
    +
    +import com.google.common.annotations.VisibleForTesting;
    +
    +/**
    + * Log4j2 appender that writers to in-memory string object.
    + */
    +@Plugin(name = "StringAppender", category = "Core", elementType = "appender", printObject = true)
    +public class StringAppender
    + extends AbstractOutputStreamAppender<StringAppender.StringOutputStreamManager> {
    +
    + private static final String APPENDER_NAME = "StringAppender";
    + private static LoggerContext context = (LoggerContext) LogManager.getContext(false);
    + private static Configuration configuration = context.getConfiguration();
    + private StringOutputStreamManager manager;
    +
    + /**
    + * Instantiate a WriterAppender and set the output destination to a
    + * new {@link OutputStreamWriter} initialized with <code>os</code>
    + * as its {@link OutputStream}.
    + *
    + * @param name The name of the Appender.
    + * @param layout The layout to format the message.
    + * @param filter
    + * @param ignoreExceptions
    + * @param immediateFlush
    + * @param manager The OutputStreamManager.
    + */
    + protected StringAppender(String name,
    + Layout<? extends Serializable> layout, Filter filter,
    + boolean ignoreExceptions, boolean immediateFlush,
    + StringOutputStreamManager manager) {
    + super(name, layout, filter, ignoreExceptions, immediateFlush, manager);
    + this.manager = manager;
    + }
    +
    + @PluginFactory
    + public static StringAppender createStringAppender(
    + @PluginAttribute("name") String nullablePatternString) {
    + PatternLayout layout;
    + if (nullablePatternString == null) {
    + layout = PatternLayout.createDefaultLayout();
    + } else {
    + layout = PatternLayout.createLayout(nullablePatternString, configuration,
    + null, null, true, false, null, null);
    + }
    +
    + return new StringAppender(APPENDER_NAME, layout, null, false, true,
    + new StringOutputStreamManager(new ByteArrayOutputStream(), "StringStream", layout));
    + }
    +
    + @VisibleForTesting
    + public void addToLogger(String loggerName, Level level) {
    + LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName);
    + loggerConfig.addAppender(this, level, null);
    + context.updateLoggers();
    + }
    +
    + @VisibleForTesting
    + public void removeFromLogger(String loggerName) {
    + LoggerConfig loggerConfig = configuration.getLoggerConfig(loggerName);
    + loggerConfig.removeAppender(APPENDER_NAME);
    + context.updateLoggers();
    + }
    +
    + public String getOutput() {
    + manager.flush();
    + return new String(manager.getStream().toByteArray());
    + }
    +
    + public void reset() {
    + manager.reset();
    + }
    +
    + protected static class StringOutputStreamManager extends OutputStreamManager {
    + ByteArrayOutputStream stream;
    +
    + protected StringOutputStreamManager(ByteArrayOutputStream os, String streamName,
    + Layout<?> layout) {
    + super(os, streamName, layout);
    + stream = os;
    + }
    +
    + public ByteArrayOutputStream getStream() {
    + return stream;
    + }
    +
    + public void reset() {
    + stream.reset();
    + }
    + }
    +}

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
    index 99fbd5d..1e2feaa 100755
    --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java
    @@ -20,7 +20,6 @@ package org.apache.hadoop.hive.ql.metadata;

      import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME;

    -import java.io.StringWriter;
      import java.util.ArrayList;
      import java.util.Arrays;
      import java.util.HashMap;
    @@ -29,8 +28,6 @@ import java.util.List;
      import java.util.Map;
      import java.util.regex.Pattern;

    -import junit.framework.TestCase;
    -
      import org.apache.hadoop.fs.FileStatus;
      import org.apache.hadoop.fs.FileSystem;
      import org.apache.hadoop.fs.Path;
    @@ -56,15 +53,19 @@ import org.apache.hadoop.mapred.SequenceFileInputFormat;
      import org.apache.hadoop.mapred.SequenceFileOutputFormat;
      import org.apache.hadoop.mapred.TextInputFormat;
      import org.apache.hadoop.util.StringUtils;
    -import org.apache.log4j.Level;
    -import org.apache.log4j.Logger;
    -import org.apache.log4j.PatternLayout;
    -import org.apache.log4j.WriterAppender;
    +import org.apache.logging.log4j.Level;
    +import org.apache.logging.log4j.LogManager;
    +import org.apache.logging.log4j.Logger;
    +import org.apache.logging.log4j.core.LoggerContext;
    +import org.apache.logging.log4j.core.config.Configuration;
    +import org.apache.logging.log4j.core.config.LoggerConfig;
      import org.apache.thrift.protocol.TBinaryProtocol;
      import org.junit.Assert;

      import com.google.common.collect.ImmutableMap;

    +import junit.framework.TestCase;
    +
      /**
       * TestHive.
       *
    @@ -248,36 +249,39 @@ public class TestHive extends TestCase {
         * @throws Throwable
         */
        public void testMetaStoreApiTiming() throws Throwable {
    - // set log level to DEBUG, as this is logged at debug level
    - Logger logger = Logger.getLogger("hive.ql.metadata.Hive");
    - Level origLevel = logger.getLevel();
    - logger.setLevel(Level.DEBUG);
    -
    - // create an appender to capture the logs in a string
    - StringWriter writer = new StringWriter();
    - WriterAppender appender = new WriterAppender(new PatternLayout(), writer);
    + // Get the RootLogger which, if you don't have log4j2-test.xml defined, will only log ERRORs
    + Logger logger = LogManager.getLogger("hive.ql.metadata.Hive");
    + Level oldLevel = logger.getLevel();
    + LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
    + Configuration config = ctx.getConfiguration();
    + LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
    + loggerConfig.setLevel(Level.DEBUG);
    + ctx.updateLoggers();
    +
    + // Create a String Appender to capture log output
    + StringAppender appender = StringAppender.createStringAppender("%m");
    + appender.addToLogger(logger.getName(), Level.DEBUG);
    + appender.start();

          try {
    - logger.addAppender(appender);
    -
            hm.clearMetaCallTiming();
            hm.getAllDatabases();
            hm.dumpAndClearMetaCallTiming("test");
    - String logStr = writer.toString();
    + String logStr = appender.getOutput();
            String expectedString = "getAllDatabases_()=";
            Assert.assertTrue(logStr + " should contain <" + expectedString,
                logStr.contains(expectedString));

            // reset the log buffer, verify new dump without any api call does not contain func
    - writer.getBuffer().setLength(0);
    + appender.reset();
            hm.dumpAndClearMetaCallTiming("test");
    - logStr = writer.toString();
    + logStr = appender.getOutput();
            Assert.assertFalse(logStr + " should not contain <" + expectedString,
                logStr.contains(expectedString));
    -
          } finally {
    - logger.setLevel(origLevel);
    - logger.removeAppender(appender);
    + loggerConfig.setLevel(oldLevel);
    + ctx.updateLoggers();
    + appender.removeFromLogger(logger.getName());
          }
        }


    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
    ----------------------------------------------------------------------
    diff --git a/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java b/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
    index 9d64b10..876ade8 100644
    --- a/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
    +++ b/service/src/java/org/apache/hive/service/cli/CLIServiceUtils.java
    @@ -18,9 +18,6 @@

      package org.apache.hive.service.cli;

    -import org.apache.log4j.Layout;
    -import org.apache.log4j.PatternLayout;
    -
      /**
       * CLIServiceUtils.
       *
    @@ -29,10 +26,6 @@ public class CLIServiceUtils {


        private static final char SEARCH_STRING_ESCAPE = '\\';
    - public static final Layout verboseLayout = new PatternLayout(
    - "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n");
    - public static final Layout nonVerboseLayout = new PatternLayout(
    - "%-5p : %m%n");

        /**
         * Convert a SQL search pattern into an equivalent Java Regex.

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
    ----------------------------------------------------------------------
    diff --git a/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java b/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
    index 70340bd..fb3921f 100644
    --- a/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
    +++ b/service/src/java/org/apache/hive/service/cli/operation/LogDivertAppender.java
    @@ -6,44 +6,94 @@
       * to you under the Apache License, Version 2.0 (the
       * "License"); you may not use this file except in compliance
       * with the License. You may obtain a copy of the License at
    - *
    - * http://www.apache.org/licenses/LICENSE-2.0
    - *
    + * <p/>
    + * http://www.apache.org/licenses/LICENSE-2.0
    + * <p/>
       * Unless required by applicable law or agreed to in writing, software
       * distributed under the License is distributed on an "AS IS" BASIS,
       * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
       * See the License for the specific language governing permissions and
       * limitations under the License.
       */
    -
      package org.apache.hive.service.cli.operation;
    -import java.io.CharArrayWriter;
    -import java.util.Enumeration;
    +
    +import java.io.ByteArrayOutputStream;
    +import java.io.OutputStream;
    +import java.io.OutputStreamWriter;
    +import java.io.Serializable;
      import java.util.regex.Pattern;

      import org.apache.hadoop.hive.ql.exec.Task;
      import org.apache.hadoop.hive.ql.log.PerfLogger;
      import org.apache.hadoop.hive.ql.session.OperationLog;
    -import org.apache.hadoop.hive.ql.session.OperationLog.LoggingLevel;
    -import org.apache.hive.service.cli.CLIServiceUtils;
    -import org.apache.log4j.Appender;
    -import org.apache.log4j.ConsoleAppender;
    -import org.apache.log4j.Layout;
    -import org.apache.log4j.Logger;
    -import org.apache.log4j.WriterAppender;
    -import org.apache.log4j.spi.Filter;
    -import org.apache.log4j.spi.LoggingEvent;
    +import org.apache.logging.log4j.LogManager;
    +import org.apache.logging.log4j.Logger;
    +import org.apache.logging.log4j.core.Appender;
    +import org.apache.logging.log4j.core.Filter;
    +import org.apache.logging.log4j.core.Layout;
    +import org.apache.logging.log4j.core.LogEvent;
    +import org.apache.logging.log4j.core.LoggerContext;
    +import org.apache.logging.log4j.core.appender.AbstractOutputStreamAppender;
    +import org.apache.logging.log4j.core.appender.ConsoleAppender;
    +import org.apache.logging.log4j.core.appender.OutputStreamManager;
    +import org.apache.logging.log4j.core.config.Configuration;
    +import org.apache.logging.log4j.core.filter.AbstractFilter;
    +import org.apache.logging.log4j.core.layout.PatternLayout;

      import com.google.common.base.Joiner;

      /**
    - * An Appender to divert logs from individual threads to the LogObject they belong to.
    + * Divert appender to redirect operation logs to separate files.
       */
    -public class LogDivertAppender extends WriterAppender {
    - private static final Logger LOG = Logger.getLogger(LogDivertAppender.class.getName());
    +public class LogDivertAppender
    + extends AbstractOutputStreamAppender<LogDivertAppender.StringOutputStreamManager> {
    + private static final Logger LOG = LogManager.getLogger(LogDivertAppender.class.getName());
    + private static LoggerContext context = (LoggerContext) LogManager.getContext(false);
    + private static Configuration configuration = context.getConfiguration();
    + public static final Layout<? extends Serializable> verboseLayout = PatternLayout.createLayout(
    + "%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n", configuration, null, null, true, false, null, null);
    + public static final Layout<? extends Serializable> nonVerboseLayout = PatternLayout.createLayout(
    + "%-5p : %m%n", configuration, null, null, true, false, null, null);
    +
        private final OperationManager operationManager;
    + private StringOutputStreamManager manager;
        private boolean isVerbose;
    - private Layout verboseLayout;
    + private final Layout<? extends Serializable> layout;
    +
    + /**
    + * Instantiate a WriterAppender and set the output destination to a
    + * new {@link OutputStreamWriter} initialized with <code>os</code>
    + * as its {@link OutputStream}.
    + *
    + * @param name The name of the Appender.
    + * @param filter Filter
    + * @param manager The OutputStreamManager.
    + * @param operationManager Operation manager
    + */
    + protected LogDivertAppender(String name, Filter filter,
    + StringOutputStreamManager manager, OperationManager operationManager,
    + OperationLog.LoggingLevel loggingMode) {
    + super(name, null, filter, false, true, manager);
    + this.operationManager = operationManager;
    + this.manager = manager;
    + this.isVerbose = (loggingMode == OperationLog.LoggingLevel.VERBOSE);
    + this.layout = getDefaultLayout();
    + }
    +
    + public Layout<? extends Serializable> getDefaultLayout() {
    + // There should be a ConsoleAppender. Copy its Layout.
    + Logger root = LogManager.getRootLogger();
    + Layout layout = null;
    +
    + for (Appender ap : ((org.apache.logging.log4j.core.Logger) root).getAppenders().values()) {
    + if (ap.getClass().equals(ConsoleAppender.class)) {
    + layout = ap.getLayout();
    + break;
    + }
    + }
    +
    + return layout;
    + }

        /**
         * A log filter that filters messages coming from the logger with the given names.
    @@ -52,31 +102,31 @@ public class LogDivertAppender extends WriterAppender {
         * they don't generate more logs for themselves when they process logs.
         * White list filter is used for less verbose log collection
         */
    - private static class NameFilter extends Filter {
    + private static class NameFilter extends AbstractFilter {
          private Pattern namePattern;
    - private LoggingLevel loggingMode;
    + private OperationLog.LoggingLevel loggingMode;
          private OperationManager operationManager;

          /* Patterns that are excluded in verbose logging level.
           * Filter out messages coming from log processing classes, or we'll run an infinite loop.
           */
          private static final Pattern verboseExcludeNamePattern = Pattern.compile(Joiner.on("|").
    - join(new String[] {LOG.getName(), OperationLog.class.getName(),
    - OperationManager.class.getName()}));
    + join(new String[]{LOG.getName(), OperationLog.class.getName(),
    + OperationManager.class.getName()}));

          /* Patterns that are included in execution logging level.
           * In execution mode, show only select logger messages.
           */
          private static final Pattern executionIncludeNamePattern = Pattern.compile(Joiner.on("|").
    - join(new String[] {"org.apache.hadoop.mapreduce.JobSubmitter",
    - "org.apache.hadoop.mapreduce.Job", "SessionState", Task.class.getName(),
    - "org.apache.hadoop.hive.ql.exec.spark.status.SparkJobMonitor"}));
    + join(new String[]{"org.apache.hadoop.mapreduce.JobSubmitter",
    + "org.apache.hadoop.mapreduce.Job", "SessionState", Task.class.getName(),
    + "org.apache.hadoop.hive.ql.exec.spark.status.SparkJobMonitor"}));

          /* Patterns that are included in performance logging level.
           * In performance mode, show execution and performance logger messages.
           */
          private static final Pattern performanceIncludeNamePattern = Pattern.compile(
    - executionIncludeNamePattern.pattern() + "|" + PerfLogger.class.getName());
    + executionIncludeNamePattern.pattern() + "|" + PerfLogger.class.getName());

          private void setCurrentNamePattern(OperationLog.LoggingLevel mode) {
            if (mode == OperationLog.LoggingLevel.VERBOSE) {
    @@ -88,26 +138,25 @@ public class LogDivertAppender extends WriterAppender {
            }
          }

    - public NameFilter(
    - OperationLog.LoggingLevel loggingMode, OperationManager op) {
    + public NameFilter(OperationLog.LoggingLevel loggingMode, OperationManager op) {
            this.operationManager = op;
            this.loggingMode = loggingMode;
            setCurrentNamePattern(loggingMode);
          }

          @Override
    - public int decide(LoggingEvent ev) {
    + public Result filter(LogEvent event) {
            OperationLog log = operationManager.getOperationLogByThread();
            boolean excludeMatches = (loggingMode == OperationLog.LoggingLevel.VERBOSE);

            if (log == null) {
    - return Filter.DENY;
    + return Result.DENY;
            }

            OperationLog.LoggingLevel currentLoggingMode = log.getOpLoggingLevel();
            // If logging is disabled, deny everything.
            if (currentLoggingMode == OperationLog.LoggingLevel.NONE) {
    - return Filter.DENY;
    + return Result.DENY;
            }
            // Look at the current session's setting
            // and set the pattern and excludeMatches accordingly.
    @@ -116,88 +165,58 @@ public class LogDivertAppender extends WriterAppender {
              setCurrentNamePattern(loggingMode);
            }

    - boolean isMatch = namePattern.matcher(ev.getLoggerName()).matches();
    + boolean isMatch = namePattern.matcher(event.getLoggerName()).matches();

            if (excludeMatches == isMatch) {
              // Deny if this is black-list filter (excludeMatches = true) and it
    - // matched
    - // or if this is whitelist filter and it didn't match
    - return Filter.DENY;
    + // matched or if this is whitelist filter and it didn't match
    + return Result.DENY;
            }
    - return Filter.NEUTRAL;
    + return Result.NEUTRAL;
          }
        }

    - /** This is where the log message will go to */
    - private final CharArrayWriter writer = new CharArrayWriter();
    -
    - private void setLayout (boolean isVerbose, Layout lo) {
    - if (isVerbose) {
    - if (lo == null) {
    - lo = CLIServiceUtils.verboseLayout;
    - LOG.info("Cannot find a Layout from a ConsoleAppender. Using default Layout pattern.");
    - }
    - } else {
    - lo = CLIServiceUtils.nonVerboseLayout;
    - }
    - setLayout(lo);
    + public static LogDivertAppender createInstance(OperationManager operationManager,
    + OperationLog.LoggingLevel loggingMode) {
    + return new LogDivertAppender("LogDivertAppender", new NameFilter(loggingMode, operationManager),
    + new StringOutputStreamManager(new ByteArrayOutputStream(), "StringStream", null),
    + operationManager, loggingMode);
        }

    - private void initLayout(boolean isVerbose) {
    - // There should be a ConsoleAppender. Copy its Layout.
    - Logger root = Logger.getRootLogger();
    - Layout layout = null;
    -
    - Enumeration<?> appenders = root.getAllAppenders();
    - while (appenders.hasMoreElements()) {
    - Appender ap = (Appender) appenders.nextElement();
    - if (ap.getClass().equals(ConsoleAppender.class)) {
    - layout = ap.getLayout();
    - break;
    - }
    - }
    - setLayout(isVerbose, layout);
    + public String getOutput() {
    + return new String(manager.getStream().toByteArray());
        }

    - public LogDivertAppender(OperationManager operationManager,
    - OperationLog.LoggingLevel loggingMode) {
    - isVerbose = (loggingMode == OperationLog.LoggingLevel.VERBOSE);
    - initLayout(isVerbose);
    - setWriter(writer);
    - setName("LogDivertAppender");
    - this.operationManager = operationManager;
    - this.verboseLayout = isVerbose ? layout : CLIServiceUtils.verboseLayout;
    - addFilter(new NameFilter(loggingMode, operationManager));
    + @Override
    + public void start() {
    + super.start();
        }

        @Override
    - public void doAppend(LoggingEvent event) {
    - OperationLog log = operationManager.getOperationLogByThread();
    + public Layout<? extends Serializable> getLayout() {

    - // Set current layout depending on the verbose/non-verbose mode.
    + // If there is a logging level change from verbose->non-verbose or vice-versa since
    + // the last subAppend call, change the layout to preserve consistency.
    + OperationLog log = operationManager.getOperationLogByThread();
          if (log != null) {
    - boolean isCurrModeVerbose = (log.getOpLoggingLevel() == OperationLog.LoggingLevel.VERBOSE);
    + isVerbose = (log.getOpLoggingLevel() == OperationLog.LoggingLevel.VERBOSE);
    + }

    - // If there is a logging level change from verbose->non-verbose or vice-versa since
    - // the last subAppend call, change the layout to preserve consistency.
    - if (isCurrModeVerbose != isVerbose) {
    - isVerbose = isCurrModeVerbose;
    - setLayout(isVerbose, verboseLayout);
    - }
    + // layout is immutable in log4j2, so we cheat here and return a different layout when
    + // verbosity changes
    + if (isVerbose) {
    + return verboseLayout;
    + } else {
    + return layout == null ? nonVerboseLayout : layout;
          }
    - super.doAppend(event);
        }

    - /**
    - * Overrides WriterAppender.subAppend(), which does the real logging. No need
    - * to worry about concurrency since log4j calls this synchronously.
    - */
        @Override
    - protected void subAppend(LoggingEvent event) {
    - super.subAppend(event);
    - // That should've gone into our writer. Notify the LogContext.
    - String logOutput = writer.toString();
    - writer.reset();
    + public void append(LogEvent event) {
    + super.append(event);
    +
    + String logOutput = getOutput();
    + manager.reset();

          OperationLog log = operationManager.getOperationLogByThread();
          if (log == null) {
    @@ -206,4 +225,22 @@ public class LogDivertAppender extends WriterAppender {
          }
          log.writeOperationLog(logOutput);
        }
    +
    + protected static class StringOutputStreamManager extends OutputStreamManager {
    + ByteArrayOutputStream stream;
    +
    + protected StringOutputStreamManager(ByteArrayOutputStream os, String streamName,
    + Layout<?> layout) {
    + super(os, streamName, layout);
    + stream = os;
    + }
    +
    + public ByteArrayOutputStream getStream() {
    + return stream;
    + }
    +
    + public void reset() {
    + stream.reset();
    + }
    + }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
    ----------------------------------------------------------------------
    diff --git a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
    index 9b0a519..304a525 100644
    --- a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
    +++ b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java
    @@ -41,8 +41,11 @@ import org.apache.hive.service.cli.RowSet;
      import org.apache.hive.service.cli.RowSetFactory;
      import org.apache.hive.service.cli.TableSchema;
      import org.apache.hive.service.cli.session.HiveSession;
    -import org.apache.log4j.Appender;
    -import org.apache.log4j.Logger;
    +import org.apache.logging.log4j.LogManager;
    +import org.apache.logging.log4j.core.Appender;
    +import org.apache.logging.log4j.core.LoggerContext;
    +import org.apache.logging.log4j.core.config.Configuration;
    +import org.apache.logging.log4j.core.config.LoggerConfig;

      /**
       * OperationManager.
    @@ -50,7 +53,6 @@ import org.apache.log4j.Logger;
       */
      public class OperationManager extends AbstractService {
        private final Log LOG = LogFactory.getLog(OperationManager.class.getName());
    -
        private final Map<OperationHandle, Operation> handleToOperation =
            new HashMap<OperationHandle, Operation>();

    @@ -83,8 +85,13 @@ public class OperationManager extends AbstractService {

        private void initOperationLogCapture(String loggingMode) {
          // Register another Appender (with the same layout) that talks to us.
    - Appender ap = new LogDivertAppender(this, OperationLog.getLoggingLevel(loggingMode));
    - Logger.getRootLogger().addAppender(ap);
    + Appender ap = LogDivertAppender.createInstance(this, OperationLog.getLoggingLevel(loggingMode));
    + LoggerContext context = (LoggerContext) LogManager.getContext(false);
    + Configuration configuration = context.getConfiguration();
    + LoggerConfig loggerConfig = configuration.getLoggerConfig(LogManager.getLogger().getName());
    + loggerConfig.addAppender(ap, null, null);
    + context.updateLoggers();
    + ap.start();
        }

        public ExecuteStatementOperation newExecuteStatementOperation(HiveSession parentSession,

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/shims/common/pom.xml
    ----------------------------------------------------------------------
    diff --git a/shims/common/pom.xml b/shims/common/pom.xml
    index 9e9a3b7..dfdec2b 100644
    --- a/shims/common/pom.xml
    +++ b/shims/common/pom.xml
    @@ -41,14 +41,19 @@
            <version>${commons-logging.version}</version>
          </dependency>
          <dependency>
    - <groupId>log4j</groupId>
    - <artifactId>log4j</artifactId>
    - <version>${log4j.version}</version>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-1.2-api</artifactId>
    + <version>${log4j2.version}</version>
          </dependency>
          <dependency>
    - <groupId>log4j</groupId>
    - <artifactId>apache-log4j-extras</artifactId>
    - <version>${log4j-extras.version}</version>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-slf4j-impl</artifactId>
    + <version>${log4j2.version}</version>
    + </dependency>
    + <dependency>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-jcl</artifactId>
    + <version>${log4j2.version}</version>
          </dependency>
          <dependency>
            <groupId>com.google.guava</groupId>

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/shims/common/src/main/java/org/apache/hadoop/hive/shims/HiveEventCounter.java
    ----------------------------------------------------------------------
    diff --git a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HiveEventCounter.java b/shims/common/src/main/java/org/apache/hadoop/hive/shims/HiveEventCounter.java
    deleted file mode 100644
    index 224b135..0000000
    --- a/shims/common/src/main/java/org/apache/hadoop/hive/shims/HiveEventCounter.java
    +++ /dev/null
    @@ -1,102 +0,0 @@
    -/**
    - * Licensed to the Apache Software Foundation (ASF) under one
    - * or more contributor license agreements. See the NOTICE file
    - * distributed with this work for additional information
    - * regarding copyright ownership. The ASF licenses this file
    - * to you under the Apache License, Version 2.0 (the
    - * "License"); you may not use this file except in compliance
    - * with the License. You may obtain a copy of the License at
    - *
    - * http://www.apache.org/licenses/LICENSE-2.0
    - *
    - * Unless required by applicable law or agreed to in writing, software
    - * distributed under the License is distributed on an "AS IS" BASIS,
    - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    - * See the License for the specific language governing permissions and
    - * limitations under the License.
    - */
    -
    -package org.apache.hadoop.hive.shims;
    -
    -import org.apache.log4j.Appender;
    -import org.apache.log4j.AppenderSkeleton;
    -import org.apache.log4j.Layout;
    -import org.apache.log4j.spi.ErrorHandler;
    -import org.apache.log4j.spi.Filter;
    -import org.apache.log4j.spi.LoggingEvent;
    -import org.apache.log4j.spi.OptionHandler;
    -
    -public class HiveEventCounter implements Appender, OptionHandler {
    -
    - AppenderSkeleton hadoopEventCounter;
    -
    - public HiveEventCounter() {
    - hadoopEventCounter = ShimLoader.getEventCounter();
    - }
    -
    - @Override
    - public void close() {
    - hadoopEventCounter.close();
    - }
    -
    - @Override
    - public boolean requiresLayout() {
    - return hadoopEventCounter.requiresLayout();
    - }
    -
    - @Override
    - public void addFilter(Filter filter) {
    - hadoopEventCounter.addFilter(filter);
    - }
    -
    - @Override
    - public void clearFilters() {
    - hadoopEventCounter.clearFilters();
    - }
    -
    - @Override
    - public void doAppend(LoggingEvent event) {
    - hadoopEventCounter.doAppend(event);
    - }
    -
    - @Override
    - public ErrorHandler getErrorHandler() {
    - return hadoopEventCounter.getErrorHandler();
    - }
    -
    - @Override
    - public Filter getFilter() {
    - return hadoopEventCounter.getFilter();
    - }
    -
    - @Override
    - public Layout getLayout() {
    - return hadoopEventCounter.getLayout();
    - }
    -
    - @Override
    - public String getName() {
    - return hadoopEventCounter.getName();
    - }
    -
    - @Override
    - public void setErrorHandler(ErrorHandler handler) {
    - hadoopEventCounter.setErrorHandler(handler);
    - }
    -
    - @Override
    - public void setLayout(Layout layout) {
    - hadoopEventCounter.setLayout(layout);
    - }
    -
    - @Override
    - public void setName(String name) {
    - hadoopEventCounter.setName(name);
    - }
    -
    - @Override
    - public void activateOptions() {
    - hadoopEventCounter.activateOptions();
    - }
    -
    -}

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/spark-client/src/test/resources/log4j.properties
    ----------------------------------------------------------------------
    diff --git a/spark-client/src/test/resources/log4j.properties b/spark-client/src/test/resources/log4j.properties
    deleted file mode 100644
    index 93a60cc..0000000
    --- a/spark-client/src/test/resources/log4j.properties
    +++ /dev/null
    @@ -1,23 +0,0 @@
    -#
    -# Licensed to the Apache Software Foundation (ASF) under one or more
    -# contributor license agreements. See the NOTICE file distributed with
    -# this work for additional information regarding copyright ownership.
    -# The ASF licenses this file to You under the Apache License, Version 2.0
    -# (the "License"); you may not use this file except in compliance with
    -# the License. You may obtain a copy of the License at
    -#
    -# http://www.apache.org/licenses/LICENSE-2.0
    -#
    -# Unless required by applicable law or agreed to in writing, software
    -# distributed under the License is distributed on an "AS IS" BASIS,
    -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -# See the License for the specific language governing permissions and
    -# limitations under the License.
    -#
    -
    -# Set everything to be logged to the file target/unit-tests.log
    -log4j.rootCategory=DEBUG, console
    -log4j.appender.console=org.apache.log4j.ConsoleAppender
    -log4j.appender.console.target=System.err
    -log4j.appender.console.layout=org.apache.log4j.PatternLayout
    -log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/spark-client/src/test/resources/log4j2.xml
    ----------------------------------------------------------------------
    diff --git a/spark-client/src/test/resources/log4j2.xml b/spark-client/src/test/resources/log4j2.xml
    new file mode 100644
    index 0000000..a435069
    --- /dev/null
    +++ b/spark-client/src/test/resources/log4j2.xml
    @@ -0,0 +1,39 @@
    +<?xml version="1.0" encoding="UTF-8"?>
    +<!--
    + Licensed to the Apache Software Foundation (ASF) under one or more
    + contributor license agreements. See the NOTICE file distributed with
    + this work for additional information regarding copyright ownership.
    + The ASF licenses this file to You under the Apache License, Version 2.0
    + (the "License"); you may not use this file except in compliance with
    + the License. You may obtain a copy of the License at
    +
    + http://www.apache.org/licenses/LICENSE-2.0
    +
    + Unless required by applicable law or agreed to in writing, software
    + distributed under the License is distributed on an "AS IS" BASIS,
    + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + See the License for the specific language governing permissions and
    + limitations under the License.
    +-->
    +
    +<Configuration status="info" strict="true" name="SparkClientLog4j2"
    + packages="org.apache.hadoop.hive.ql.log">
    +
    + <Properties>
    + <Property name="spark.log.level">DEBUG</Property>
    + <Property name="spark.root.logger">console</Property>
    + </Properties>
    +
    + <Appenders>
    + <Console name="console" target="SYSTEM_ERR">
    + <PatternLayout pattern="%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n"/>
    + </Console>
    + </Appenders>
    +
    + <Loggers>
    + <Root level="DEBUG">
    + <AppenderRef ref="${sys:spark.root.logger}" level="${sys:spark.log.level}"/>
    + </Root>
    + </Loggers>
    +
    +</Configuration>

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/storage-api/pom.xml
    ----------------------------------------------------------------------
    diff --git a/storage-api/pom.xml b/storage-api/pom.xml
    index 71b51b8..71b79f1 100644
    --- a/storage-api/pom.xml
    +++ b/storage-api/pom.xml
    @@ -32,13 +32,6 @@
        </properties>

        <dependencies>
    - <!-- dependencies are always listed in sorted order by groupId, artifectId -->
    - <!-- inter-project -->
    - <dependency>
    - <groupId>log4j</groupId>
    - <artifactId>log4j</artifactId>
    - <version>${log4j.version}</version>
    - </dependency>
          <!-- test inter-project -->
          <dependency>
            <groupId>junit</groupId>

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/testutils/ptest2/pom.xml
    ----------------------------------------------------------------------
    diff --git a/testutils/ptest2/pom.xml b/testutils/ptest2/pom.xml
    index 211678e..2cf7f45 100644
    --- a/testutils/ptest2/pom.xml
    +++ b/testutils/ptest2/pom.xml
    @@ -64,6 +64,26 @@ limitations under the License.
            <version>15.0</version>
          </dependency>
          <dependency>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-1.2-api</artifactId>
    + <version>${log4j2.version}</version>
    + </dependency>
    + <dependency>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-web</artifactId>
    + <version>${log4j2.version}</version>
    + </dependency>
    + <dependency>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-slf4j-impl</artifactId>
    + <version>${log4j2.version}</version>
    + </dependency>
    + <dependency>
    + <groupId>org.apache.logging.log4j</groupId>
    + <artifactId>log4j-jcl</artifactId>
    + <version>${log4j2.version}</version>
    + </dependency>
    + <dependency>
            <groupId>log4j</groupId>
            <artifactId>log4j</artifactId>
            <version>1.2.17</version>

    http://git-wip-us.apache.org/repos/asf/hive/blob/c93d6c77/testutils/ptest2/src/main/resources/log4j.properties
    ----------------------------------------------------------------------
    diff --git a/testutils/ptest2/src/main/resources/log4j.properties b/testutils/ptest2/src/main/resources/log4j.properties
    deleted file mode 100644
    index edb9696..0000000
    --- a/testutils/ptest2/src/main/resources/log4j.properties
    +++ /dev/null
    @@ -1,37 +0,0 @@
    -# Licensed to the Apache Software Foundation (ASF) under one or more
    -# contributor license agreements. See the NOTICE file distributed with
    -# this work for additional information regarding copyright ownership.
    -# The ASF licenses this file to You under the Apache License, Version 2.0
    -# (the "License"); you may not use this file except in compliance with
    -# the License. You may obtain a copy of the License at
    -#
    -# http://www.apache.org/licenses/LICENSE-2.0
    -#
    -# Unless required by applicable law or agreed to in writing, software
    -# distributed under the License is distributed on an "AS IS" BASIS,
    -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    -# See the License for the specific language governing permissions and
    -# limitations under the License.
    -
    -hive.ptest.logdir=target
    -
    -log4j.rootLogger=DEBUG,FILE
    -log4j.threshhold=ALL
    -
    -log4j.appender.FILE=org.apache.log4j.RollingFileAppender
    -log4j.appender.FILE.File=${hive.ptest.logdir}/ptest.log
    -log4j.appender.FILE.MaxFileSize=50MB
    -log4j.appender.FILE.MaxBackupIndex=1
    -log4j.appender.FILE.layout=org.apache.log4j.PatternLayout
    -log4j.appender.FILE.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1}:%L - %m%n
    -
    -log4j.logger.org.apache.http=INFO
    -log4j.logger.org.springframework=INFO
    -log4j.logger.org.jclouds=INFO
    -log4j.logger.jclouds=INFO
    -log4j.logger.org.apache.hive=DEBUG
    -log4j.logger.org.apache.http=TRACE
    -
    -# Silence useless ZK logs
    -log4j.logger.org.apache.zookeeper.server.NIOServerCnxn=WARN
    -log4j.logger.org.apache.zookeeper.ClientCnxnSocketNIO=WARN
  • Sershe at Aug 17, 2015 at 10:00 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
    index 305e979,1292a64..e8cb821
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class NoSuchObjectException extends TException implements org.apache.thrift.TBase<NoSuchObjectException, NoSuchObjectException._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class NoSuchObjectException extends TException implements org.apache.thrift.TBase<NoSuchObjectException, NoSuchObjectException._Fields>, java.io.Serializable, Cloneable, Comparable<NoSuchObjectException> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NoSuchObjectException");

         private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
    index 92dbb7f,d1c430d..9997b93
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class NoSuchTxnException extends TException implements org.apache.thrift.TBase<NoSuchTxnException, NoSuchTxnException._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class NoSuchTxnException extends TException implements org.apache.thrift.TBase<NoSuchTxnException, NoSuchTxnException._Fields>, java.io.Serializable, Cloneable, Comparable<NoSuchTxnException> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NoSuchTxnException");

         private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java
    index f196c1c,bcf4f51..6f594c5
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class NotificationEvent implements org.apache.thrift.TBase<NotificationEvent, NotificationEvent._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class NotificationEvent implements org.apache.thrift.TBase<NotificationEvent, NotificationEvent._Fields>, java.io.Serializable, Cloneable, Comparable<NotificationEvent> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEvent");

         private static final org.apache.thrift.protocol.TField EVENT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("eventId", org.apache.thrift.protocol.TType.I64, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java
    index 6a8c8ab,c2bc4e8..0c6dc01
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class NotificationEventRequest implements org.apache.thrift.TBase<NotificationEventRequest, NotificationEventRequest._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class NotificationEventRequest implements org.apache.thrift.TBase<NotificationEventRequest, NotificationEventRequest._Fields>, java.io.Serializable, Cloneable, Comparable<NotificationEventRequest> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEventRequest");

         private static final org.apache.thrift.protocol.TField LAST_EVENT_FIELD_DESC = new org.apache.thrift.protocol.TField("lastEvent", org.apache.thrift.protocol.TType.I64, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java
    index 0123e87,24f9ce4..3295c3c
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class NotificationEventResponse implements org.apache.thrift.TBase<NotificationEventResponse, NotificationEventResponse._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class NotificationEventResponse implements org.apache.thrift.TBase<NotificationEventResponse, NotificationEventResponse._Fields>, java.io.Serializable, Cloneable, Comparable<NotificationEventResponse> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEventResponse");

         private static final org.apache.thrift.protocol.TField EVENTS_FIELD_DESC = new org.apache.thrift.protocol.TField("events", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java
    index 0d05378,c5f9ccf..a09575d
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class OpenTxnRequest implements org.apache.thrift.TBase<OpenTxnRequest, OpenTxnRequest._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class OpenTxnRequest implements org.apache.thrift.TBase<OpenTxnRequest, OpenTxnRequest._Fields>, java.io.Serializable, Cloneable, Comparable<OpenTxnRequest> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenTxnRequest");

         private static final org.apache.thrift.protocol.TField NUM_TXNS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_txns", org.apache.thrift.protocol.TType.I32, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
    index f5efbe3,c233422..d874654
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class OpenTxnsResponse implements org.apache.thrift.TBase<OpenTxnsResponse, OpenTxnsResponse._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class OpenTxnsResponse implements org.apache.thrift.TBase<OpenTxnsResponse, OpenTxnsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<OpenTxnsResponse> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenTxnsResponse");

         private static final org.apache.thrift.protocol.TField TXN_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_ids", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
    index 55d5174,da7bd55..d83b83d
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, java.io.Serializable, Cloneable, Comparable<Order> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Order");

         private static final org.apache.thrift.protocol.TField COL_FIELD_DESC = new org.apache.thrift.protocol.TField("col", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
    index 7d29d09,224d28e..e38798a
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class Partition implements org.apache.thrift.TBase<Partition, Partition._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class Partition implements org.apache.thrift.TBase<Partition, Partition._Fields>, java.io.Serializable, Cloneable, Comparable<Partition> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Partition");

         private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java
    index bcd92b8,c50a100..6ccebb3
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class PartitionListComposingSpec implements org.apache.thrift.TBase<PartitionListComposingSpec, PartitionListComposingSpec._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class PartitionListComposingSpec implements org.apache.thrift.TBase<PartitionListComposingSpec, PartitionListComposingSpec._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionListComposingSpec> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionListComposingSpec");

         private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
    index 06cc8fb,b5251af..8d2f1b4
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class PartitionSpec implements org.apache.thrift.TBase<PartitionSpec, PartitionSpec._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class PartitionSpec implements org.apache.thrift.TBase<PartitionSpec, PartitionSpec._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionSpec> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionSpec");

         private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java
    index 18ab134,5574e0b..08d8548
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class PartitionSpecWithSharedSD implements org.apache.thrift.TBase<PartitionSpecWithSharedSD, PartitionSpecWithSharedSD._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class PartitionSpecWithSharedSD implements org.apache.thrift.TBase<PartitionSpecWithSharedSD, PartitionSpecWithSharedSD._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionSpecWithSharedSD> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionSpecWithSharedSD");

         private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java
    index 193d9e9,e1ec73e..57ff72e
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class PartitionWithoutSD implements org.apache.thrift.TBase<PartitionWithoutSD, PartitionWithoutSD._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class PartitionWithoutSD implements org.apache.thrift.TBase<PartitionWithoutSD, PartitionWithoutSD._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionWithoutSD> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionWithoutSD");

         private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java
    index c6fa8a2,6149c31..b10f3c8
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class PartitionsByExprRequest implements org.apache.thrift.TBase<PartitionsByExprRequest, PartitionsByExprRequest._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class PartitionsByExprRequest implements org.apache.thrift.TBase<PartitionsByExprRequest, PartitionsByExprRequest._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsByExprRequest> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsByExprRequest");

         private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
    index 525ce0e,740f7bd..3a0376d
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class PartitionsByExprResult implements org.apache.thrift.TBase<PartitionsByExprResult, PartitionsByExprResult._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class PartitionsByExprResult implements org.apache.thrift.TBase<PartitionsByExprResult, PartitionsByExprResult._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsByExprResult> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsByExprResult");

         private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
    index d224453,5d1ee87..bfa77f6
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class PartitionsStatsRequest implements org.apache.thrift.TBase<PartitionsStatsRequest, PartitionsStatsRequest._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class PartitionsStatsRequest implements org.apache.thrift.TBase<PartitionsStatsRequest, PartitionsStatsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsStatsRequest> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsRequest");

         private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
    index c9ae14e,da33014..757f209
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class PartitionsStatsResult implements org.apache.thrift.TBase<PartitionsStatsResult, PartitionsStatsResult._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class PartitionsStatsResult implements org.apache.thrift.TBase<PartitionsStatsResult, PartitionsStatsResult._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsStatsResult> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsResult");

         private static final org.apache.thrift.protocol.TField PART_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("partStats", org.apache.thrift.protocol.TType.MAP, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
    index 0c9518a,8f29f50..889a41c
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class PrincipalPrivilegeSet implements org.apache.thrift.TBase<PrincipalPrivilegeSet, PrincipalPrivilegeSet._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class PrincipalPrivilegeSet implements org.apache.thrift.TBase<PrincipalPrivilegeSet, PrincipalPrivilegeSet._Fields>, java.io.Serializable, Cloneable, Comparable<PrincipalPrivilegeSet> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrincipalPrivilegeSet");

         private static final org.apache.thrift.protocol.TField USER_PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("userPrivileges", org.apache.thrift.protocol.TType.MAP, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
    index 4285ed8,2fd819c..741ace7
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class PrivilegeBag implements org.apache.thrift.TBase<PrivilegeBag, PrivilegeBag._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class PrivilegeBag implements org.apache.thrift.TBase<PrivilegeBag, PrivilegeBag._Fields>, java.io.Serializable, Cloneable, Comparable<PrivilegeBag> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrivilegeBag");

         private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java
    index 5869457,c04e196..ba52582
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class PrivilegeGrantInfo implements org.apache.thrift.TBase<PrivilegeGrantInfo, PrivilegeGrantInfo._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class PrivilegeGrantInfo implements org.apache.thrift.TBase<PrivilegeGrantInfo, PrivilegeGrantInfo._Fields>, java.io.Serializable, Cloneable, Comparable<PrivilegeGrantInfo> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrivilegeGrantInfo");

         private static final org.apache.thrift.protocol.TField PRIVILEGE_FIELD_DESC = new org.apache.thrift.protocol.TField("privilege", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java
    index c230eab,3b3df25..cffcf91
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class ResourceUri implements org.apache.thrift.TBase<ResourceUri, ResourceUri._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class ResourceUri implements org.apache.thrift.TBase<ResourceUri, ResourceUri._Fields>, java.io.Serializable, Cloneable, Comparable<ResourceUri> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ResourceUri");

         private static final org.apache.thrift.protocol.TField RESOURCE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("resourceType", org.apache.thrift.protocol.TType.I32, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java
    index 35fcf58,5c882d2..b9052a3
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class Role implements org.apache.thrift.TBase<Role, Role._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class Role implements org.apache.thrift.TBase<Role, Role._Fields>, java.io.Serializable, Cloneable, Comparable<Role> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Role");

         private static final org.apache.thrift.protocol.TField ROLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("roleName", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java
    index 8993268,c4beb08..6e3c200
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class RolePrincipalGrant implements org.apache.thrift.TBase<RolePrincipalGrant, RolePrincipalGrant._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class RolePrincipalGrant implements org.apache.thrift.TBase<RolePrincipalGrant, RolePrincipalGrant._Fields>, java.io.Serializable, Cloneable, Comparable<RolePrincipalGrant> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RolePrincipalGrant");

         private static final org.apache.thrift.protocol.TField ROLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("roleName", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java
    index 31ee943,8772180..85af5c5
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class Schema implements org.apache.thrift.TBase<Schema, Schema._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class Schema implements org.apache.thrift.TBase<Schema, Schema._Fields>, java.io.Serializable, Cloneable, Comparable<Schema> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Schema");

         private static final org.apache.thrift.protocol.TField FIELD_SCHEMAS_FIELD_DESC = new org.apache.thrift.protocol.TField("fieldSchemas", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
    index 24d65bb,b30e698..73853a2
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class SerDeInfo implements org.apache.thrift.TBase<SerDeInfo, SerDeInfo._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class SerDeInfo implements org.apache.thrift.TBase<SerDeInfo, SerDeInfo._Fields>, java.io.Serializable, Cloneable, Comparable<SerDeInfo> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SerDeInfo");

         private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
    index e62e410,7da298c..d09e413
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class SetPartitionsStatsRequest implements org.apache.thrift.TBase<SetPartitionsStatsRequest, SetPartitionsStatsRequest._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class SetPartitionsStatsRequest implements org.apache.thrift.TBase<SetPartitionsStatsRequest, SetPartitionsStatsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<SetPartitionsStatsRequest> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SetPartitionsStatsRequest");

         private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java
    index 28ad1c9,7756384..7d4d12c
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactRequest.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class ShowCompactRequest implements org.apache.thrift.TBase<ShowCompactRequest, ShowCompactRequest._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class ShowCompactRequest implements org.apache.thrift.TBase<ShowCompactRequest, ShowCompactRequest._Fields>, java.io.Serializable, Cloneable, Comparable<ShowCompactRequest> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowCompactRequest");



    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java
    index c882b7b,dd1e857..7112f26
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class ShowCompactResponse implements org.apache.thrift.TBase<ShowCompactResponse, ShowCompactResponse._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class ShowCompactResponse implements org.apache.thrift.TBase<ShowCompactResponse, ShowCompactResponse._Fields>, java.io.Serializable, Cloneable, Comparable<ShowCompactResponse> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowCompactResponse");

         private static final org.apache.thrift.protocol.TField COMPACTS_FIELD_DESC = new org.apache.thrift.protocol.TField("compacts", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
    index 365a401,cd7e79e..810b140
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponseElement.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class ShowCompactResponseElement implements org.apache.thrift.TBase<ShowCompactResponseElement, ShowCompactResponseElement._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class ShowCompactResponseElement implements org.apache.thrift.TBase<ShowCompactResponseElement, ShowCompactResponseElement._Fields>, java.io.Serializable, Cloneable, Comparable<ShowCompactResponseElement> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowCompactResponseElement");

         private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java
    index 7a1fd6f,122c070..c13fda4
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksRequest.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class ShowLocksRequest implements org.apache.thrift.TBase<ShowLocksRequest, ShowLocksRequest._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class ShowLocksRequest implements org.apache.thrift.TBase<ShowLocksRequest, ShowLocksRequest._Fields>, java.io.Serializable, Cloneable, Comparable<ShowLocksRequest> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowLocksRequest");



    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java
    index 42cfe8c,52b0bbc..2289195
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class ShowLocksResponse implements org.apache.thrift.TBase<ShowLocksResponse, ShowLocksResponse._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class ShowLocksResponse implements org.apache.thrift.TBase<ShowLocksResponse, ShowLocksResponse._Fields>, java.io.Serializable, Cloneable, Comparable<ShowLocksResponse> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowLocksResponse");

         private static final org.apache.thrift.protocol.TField LOCKS_FIELD_DESC = new org.apache.thrift.protocol.TField("locks", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java
    index 2f7c24f,8be9b05..ba17ea7
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponseElement.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class ShowLocksResponseElement implements org.apache.thrift.TBase<ShowLocksResponseElement, ShowLocksResponseElement._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class ShowLocksResponseElement implements org.apache.thrift.TBase<ShowLocksResponseElement, ShowLocksResponseElement._Fields>, java.io.Serializable, Cloneable, Comparable<ShowLocksResponseElement> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ShowLocksResponseElement");

         private static final org.apache.thrift.protocol.TField LOCKID_FIELD_DESC = new org.apache.thrift.protocol.TField("lockid", org.apache.thrift.protocol.TType.I64, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
    index ab5c0ed,bc64495..5cb5e2b
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class SkewedInfo implements org.apache.thrift.TBase<SkewedInfo, SkewedInfo._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class SkewedInfo implements org.apache.thrift.TBase<SkewedInfo, SkewedInfo._Fields>, java.io.Serializable, Cloneable, Comparable<SkewedInfo> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SkewedInfo");

         private static final org.apache.thrift.protocol.TField SKEWED_COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("skewedColNames", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java
    index 813b4f0,165a879..6b5cf87
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class StorageDescriptor implements org.apache.thrift.TBase<StorageDescriptor, StorageDescriptor._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class StorageDescriptor implements org.apache.thrift.TBase<StorageDescriptor, StorageDescriptor._Fields>, java.io.Serializable, Cloneable, Comparable<StorageDescriptor> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StorageDescriptor");

         private static final org.apache.thrift.protocol.TField COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("cols", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java
    index db3274a,9906ff3..2160bc8
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StringColumnStatsData.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class StringColumnStatsData implements org.apache.thrift.TBase<StringColumnStatsData, StringColumnStatsData._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class StringColumnStatsData implements org.apache.thrift.TBase<StringColumnStatsData, StringColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<StringColumnStatsData> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("StringColumnStatsData");

         private static final org.apache.thrift.protocol.TField MAX_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField("maxColLen", org.apache.thrift.protocol.TType.I64, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
    index 484bd6a,51b9e38..ca16924
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, java.io.Serializable, Cloneable, Comparable<Table> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Table");

         private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
    index 2073829,1edcaf9..11d3b03
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class TableStatsRequest implements org.apache.thrift.TBase<TableStatsRequest, TableStatsRequest._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class TableStatsRequest implements org.apache.thrift.TBase<TableStatsRequest, TableStatsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<TableStatsRequest> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsRequest");

         private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
    index 541370f,25a1f25..f1104e1
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class TableStatsResult implements org.apache.thrift.TBase<TableStatsResult, TableStatsResult._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class TableStatsResult implements org.apache.thrift.TBase<TableStatsResult, TableStatsResult._Fields>, java.io.Serializable, Cloneable, Comparable<TableStatsResult> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsResult");

         private static final org.apache.thrift.protocol.TField TABLE_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("tableStats", org.apache.thrift.protocol.TType.LIST, (short)1);
  • Sershe at Aug 17, 2015 at 10:00 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
    index 939c15a,e9088e0..c6ad69e
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class CompactionRequest implements org.apache.thrift.TBase<CompactionRequest, CompactionRequest._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class CompactionRequest implements org.apache.thrift.TBase<CompactionRequest, CompactionRequest._Fields>, java.io.Serializable, Cloneable, Comparable<CompactionRequest> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CompactionRequest");

         private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java
    index bdcf21d,000670a..99e7a83
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class ConfigValSecurityException extends TException implements org.apache.thrift.TBase<ConfigValSecurityException, ConfigValSecurityException._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class ConfigValSecurityException extends TException implements org.apache.thrift.TBase<ConfigValSecurityException, ConfigValSecurityException._Fields>, java.io.Serializable, Cloneable, Comparable<ConfigValSecurityException> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ConfigValSecurityException");

         private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java
    index 8b8e5c4,d55d874..89abc78
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class CurrentNotificationEventId implements org.apache.thrift.TBase<CurrentNotificationEventId, CurrentNotificationEventId._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class CurrentNotificationEventId implements org.apache.thrift.TBase<CurrentNotificationEventId, CurrentNotificationEventId._Fields>, java.io.Serializable, Cloneable, Comparable<CurrentNotificationEventId> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CurrentNotificationEventId");

         private static final org.apache.thrift.protocol.TField EVENT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("eventId", org.apache.thrift.protocol.TType.I64, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
    index c3c531d,56b7281..759eec9
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class Database implements org.apache.thrift.TBase<Database, Database._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class Database implements org.apache.thrift.TBase<Database, Database._Fields>, java.io.Serializable, Cloneable, Comparable<Database> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Database");

         private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java
    index 3e02db5,0b406d6..b4a44a4
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class Date implements org.apache.thrift.TBase<Date, Date._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class Date implements org.apache.thrift.TBase<Date, Date._Fields>, java.io.Serializable, Cloneable, Comparable<Date> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Date");

         private static final org.apache.thrift.protocol.TField DAYS_SINCE_EPOCH_FIELD_DESC = new org.apache.thrift.protocol.TField("daysSinceEpoch", org.apache.thrift.protocol.TType.I64, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java
    index e9a577d,7a3d4ed..7050334
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class DateColumnStatsData implements org.apache.thrift.TBase<DateColumnStatsData, DateColumnStatsData._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class DateColumnStatsData implements org.apache.thrift.TBase<DateColumnStatsData, DateColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<DateColumnStatsData> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DateColumnStatsData");

         private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.STRUCT, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java
    index ed8bb18,9215ce9..1f82543
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class Decimal implements org.apache.thrift.TBase<Decimal, Decimal._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class Decimal implements org.apache.thrift.TBase<Decimal, Decimal._Fields>, java.io.Serializable, Cloneable, Comparable<Decimal> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Decimal");

         private static final org.apache.thrift.protocol.TField UNSCALED_FIELD_DESC = new org.apache.thrift.protocol.TField("unscaled", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java
    index 951d479,e64ca36..02092dc
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class DecimalColumnStatsData implements org.apache.thrift.TBase<DecimalColumnStatsData, DecimalColumnStatsData._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class DecimalColumnStatsData implements org.apache.thrift.TBase<DecimalColumnStatsData, DecimalColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<DecimalColumnStatsData> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DecimalColumnStatsData");

         private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.STRUCT, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java
    index 4203fd8,2509ed5..52288e5
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class DoubleColumnStatsData implements org.apache.thrift.TBase<DoubleColumnStatsData, DoubleColumnStatsData._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class DoubleColumnStatsData implements org.apache.thrift.TBase<DoubleColumnStatsData, DoubleColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<DoubleColumnStatsData> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DoubleColumnStatsData");

         private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.DOUBLE, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java
    index 59a66f3,5e3a2d1..0d1e50d
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class DropPartitionsExpr implements org.apache.thrift.TBase<DropPartitionsExpr, DropPartitionsExpr._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class DropPartitionsExpr implements org.apache.thrift.TBase<DropPartitionsExpr, DropPartitionsExpr._Fields>, java.io.Serializable, Cloneable, Comparable<DropPartitionsExpr> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropPartitionsExpr");

         private static final org.apache.thrift.protocol.TField EXPR_FIELD_DESC = new org.apache.thrift.protocol.TField("expr", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java
    index 1923f38,24536ba..46cc9a7
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class DropPartitionsRequest implements org.apache.thrift.TBase<DropPartitionsRequest, DropPartitionsRequest._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class DropPartitionsRequest implements org.apache.thrift.TBase<DropPartitionsRequest, DropPartitionsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<DropPartitionsRequest> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropPartitionsRequest");

         private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
    index b7f69f2,c139e65..09da136
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class DropPartitionsResult implements org.apache.thrift.TBase<DropPartitionsResult, DropPartitionsResult._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class DropPartitionsResult implements org.apache.thrift.TBase<DropPartitionsResult, DropPartitionsResult._Fields>, java.io.Serializable, Cloneable, Comparable<DropPartitionsResult> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropPartitionsResult");

         private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
    index ab0b399,6accb8d..3eabc86
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class EnvironmentContext implements org.apache.thrift.TBase<EnvironmentContext, EnvironmentContext._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class EnvironmentContext implements org.apache.thrift.TBase<EnvironmentContext, EnvironmentContext._Fields>, java.io.Serializable, Cloneable, Comparable<EnvironmentContext> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("EnvironmentContext");

         private static final org.apache.thrift.protocol.TField PROPERTIES_FIELD_DESC = new org.apache.thrift.protocol.TField("properties", org.apache.thrift.protocol.TType.MAP, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
    index a993810,ba69622..e73edd4
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class FieldSchema implements org.apache.thrift.TBase<FieldSchema, FieldSchema._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class FieldSchema implements org.apache.thrift.TBase<FieldSchema, FieldSchema._Fields>, java.io.Serializable, Cloneable, Comparable<FieldSchema> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FieldSchema");

         private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
    index 44b83da,f3d439c..25f9d54
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class FireEventRequest implements org.apache.thrift.TBase<FireEventRequest, FireEventRequest._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class FireEventRequest implements org.apache.thrift.TBase<FireEventRequest, FireEventRequest._Fields>, java.io.Serializable, Cloneable, Comparable<FireEventRequest> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FireEventRequest");

         private static final org.apache.thrift.protocol.TField SUCCESSFUL_FIELD_DESC = new org.apache.thrift.protocol.TField("successful", org.apache.thrift.protocol.TType.BOOL, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java
    index 051f411,d95ae06..6f277aa
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class FireEventResponse implements org.apache.thrift.TBase<FireEventResponse, FireEventResponse._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class FireEventResponse implements org.apache.thrift.TBase<FireEventResponse, FireEventResponse._Fields>, java.io.Serializable, Cloneable, Comparable<FireEventResponse> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FireEventResponse");



    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
    index c40e33d,50eff73..33c617e
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
    @@@ -31,7 -33,9 +33,9 @@@ import javax.annotation.Generated
       import org.slf4j.Logger;
       import org.slf4j.LoggerFactory;

    - public class Function implements org.apache.thrift.TBase<Function, Function._Fields>, java.io.Serializable, Cloneable {
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class Function implements org.apache.thrift.TBase<Function, Function._Fields>, java.io.Serializable, Cloneable, Comparable<Function> {
         private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Function");

         private static final org.apache.thrift.protocol.TField FUNCTION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("functionName", org.apache.thrift.protocol.TType.STRING, (short)1);

    http://git-wip-us.apache.org/repos/asf/hive/blob/c528294b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
    ----------------------------------------------------------------------
    diff --cc metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
    index 0000000,0a9e27b..170d8e7
    mode 000000,100644..100644
    --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
    +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
    @@@ -1,0 -1,447 +1,447 @@@
    + /**
    + * Autogenerated by Thrift Compiler (0.9.2)
    + *
    + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
    + * @generated
    + */
    + package org.apache.hadoop.hive.metastore.api;
    +
    + import org.apache.thrift.scheme.IScheme;
    + import org.apache.thrift.scheme.SchemeFactory;
    + import org.apache.thrift.scheme.StandardScheme;
    +
    + import org.apache.thrift.scheme.TupleScheme;
    + import org.apache.thrift.protocol.TTupleProtocol;
    + import org.apache.thrift.protocol.TProtocolException;
    + import org.apache.thrift.EncodingUtils;
    + import org.apache.thrift.TException;
    + import org.apache.thrift.async.AsyncMethodCallback;
    + import org.apache.thrift.server.AbstractNonblockingServer.*;
    + import java.util.List;
    + import java.util.ArrayList;
    + import java.util.Map;
    + import java.util.HashMap;
    + import java.util.EnumMap;
    + import java.util.Set;
    + import java.util.HashSet;
    + import java.util.EnumSet;
    + import java.util.Collections;
    + import java.util.BitSet;
    + import java.nio.ByteBuffer;
    + import java.util.Arrays;
    + import javax.annotation.Generated;
    + import org.slf4j.Logger;
    + import org.slf4j.LoggerFactory;
    +
    + @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
      -@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-3")
    ++@Generated(value = "Autogenerated by Thrift Compiler (0.9.2)", date = "2015-8-14")
    + public class GetAllFunctionsResponse implements org.apache.thrift.TBase<GetAllFunctionsResponse, GetAllFunctionsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetAllFunctionsResponse> {
    + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetAllFunctionsResponse");
    +
    + private static final org.apache.thrift.protocol.TField FUNCTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("functions", org.apache.thrift.protocol.TType.LIST, (short)1);
    +
    + private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
    + static {
    + schemes.put(StandardScheme.class, new GetAllFunctionsResponseStandardSchemeFactory());
    + schemes.put(TupleScheme.class, new GetAllFunctionsResponseTupleSchemeFactory());
    + }
    +
    + private List<Function> functions; // optional
    +
    + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
    + public enum _Fields implements org.apache.thrift.TFieldIdEnum {
    + FUNCTIONS((short)1, "functions");
    +
    + private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
    +
    + static {
    + for (_Fields field : EnumSet.allOf(_Fields.class)) {
    + byName.put(field.getFieldName(), field);
    + }
    + }
    +
    + /**
    + * Find the _Fields constant that matches fieldId, or null if its not found.
    + */
    + public static _Fields findByThriftId(int fieldId) {
    + switch(fieldId) {
    + case 1: // FUNCTIONS
    + return FUNCTIONS;
    + default:
    + return null;
    + }
    + }
    +
    + /**
    + * Find the _Fields constant that matches field