FAQ
Repository: hive
Updated Branches:
   refs/heads/master 4baf475ff -> dca4233da


http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/tez/cte_mat_1.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cte_mat_1.q.out b/ql/src/test/results/clientpositive/tez/cte_mat_1.q.out
new file mode 100644
index 0000000..bbe4296
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/cte_mat_1.q.out
@@ -0,0 +1,44 @@
+PREHOOK: query: explain
+with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+
+Stage-0
+ Fetch Operator
+ limit:-1
+ Stage-1
+ Reducer 2
+ File Output Operator [FS_10]
+ Select Operator [SEL_9] (rows=275 width=10)
+ Output:["_col0"]
+ Merge Join Operator [MERGEJOIN_15] (rows=275 width=10)
+ Conds:RS_6.'5'=RS_7.'5'(Inner)
+ <-Map 1 [SIMPLE_EDGE]
+ SHUFFLE [RS_6]
+ PartitionCols:'5'
+ Select Operator [SEL_2] (rows=250 width=10)
+ Filter Operator [FIL_13] (rows=250 width=10)
+ predicate:(key = '5')
+ TableScan [TS_0] (rows=500 width=10)
+ default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]
+ <-Map 3 [SIMPLE_EDGE]
+ SHUFFLE [RS_7]
+ PartitionCols:'5'
+ Select Operator [SEL_5] (rows=250 width=10)
+ Filter Operator [FIL_14] (rows=250 width=10)
+ predicate:(key = '5')
+ TableScan [TS_3] (rows=500 width=10)
+ default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]
+

http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/tez/cte_mat_2.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cte_mat_2.q.out b/ql/src/test/results/clientpositive/tez/cte_mat_2.q.out
new file mode 100644
index 0000000..bbe4296
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/cte_mat_2.q.out
@@ -0,0 +1,44 @@
+PREHOOK: query: explain
+with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
+
+Stage-0
+ Fetch Operator
+ limit:-1
+ Stage-1
+ Reducer 2
+ File Output Operator [FS_10]
+ Select Operator [SEL_9] (rows=275 width=10)
+ Output:["_col0"]
+ Merge Join Operator [MERGEJOIN_15] (rows=275 width=10)
+ Conds:RS_6.'5'=RS_7.'5'(Inner)
+ <-Map 1 [SIMPLE_EDGE]
+ SHUFFLE [RS_6]
+ PartitionCols:'5'
+ Select Operator [SEL_2] (rows=250 width=10)
+ Filter Operator [FIL_13] (rows=250 width=10)
+ predicate:(key = '5')
+ TableScan [TS_0] (rows=500 width=10)
+ default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]
+ <-Map 3 [SIMPLE_EDGE]
+ SHUFFLE [RS_7]
+ PartitionCols:'5'
+ Select Operator [SEL_5] (rows=250 width=10)
+ Filter Operator [FIL_14] (rows=250 width=10)
+ predicate:(key = '5')
+ TableScan [TS_3] (rows=500 width=10)
+ default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]
+

http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/tez/cte_mat_3.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cte_mat_3.q.out b/ql/src/test/results/clientpositive/tez/cte_mat_3.q.out
new file mode 100644
index 0000000..b700d44
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/cte_mat_3.q.out
@@ -0,0 +1,59 @@
+PREHOOK: query: explain
+with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+
+Stage-3
+ Fetch Operator
+ limit:-1
+ Stage-4
+ Reducer 3
+ File Output Operator [FS_15]
+ Merge Join Operator [MERGEJOIN_20] (rows=1 width=0)
+ Conds:RS_11._col0=RS_12._col0(Inner),Output:["_col0"]
+ <-Map 2 [SIMPLE_EDGE]
+ SHUFFLE [RS_11]
+ PartitionCols:_col0
+ Select Operator [SEL_7] (rows=1 width=0)
+ Output:["_col0"]
+ Filter Operator [FIL_18] (rows=1 width=0)
+ predicate:key is not null
+ TableScan [TS_5] (rows=1 width=0)
+ default@q1,a,Tbl:PARTIAL,Col:NONE,Output:["key"]
+ <-Map 4 [SIMPLE_EDGE]
+ SHUFFLE [RS_12]
+ PartitionCols:_col0
+ Select Operator [SEL_10] (rows=1 width=0)
+ Output:["_col0"]
+ Filter Operator [FIL_19] (rows=1 width=0)
+ predicate:key is not null
+ TableScan [TS_8] (rows=1 width=0)
+ default@q1,a,Tbl:PARTIAL,Col:NONE,Output:["key"]
+ Stage-2
+ Dependency Collection{}
+ Stage-1
+ Map 1
+ File Output Operator [FS_3]
+ table:{"name:":"default.q1"}
+ Select Operator [SEL_2] (rows=250 width=10)
+ Output:["_col0","_col1"]
+ Filter Operator [FIL_4] (rows=250 width=10)
+ predicate:(key = '5')
+ TableScan [TS_0] (rows=500 width=10)
+ default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
+ Stage-0
+ Move Operator
+ Please refer to the previous Stage-1
+

http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/tez/cte_mat_4.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cte_mat_4.q.out b/ql/src/test/results/clientpositive/tez/cte_mat_4.q.out
new file mode 100644
index 0000000..bf13958
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/cte_mat_4.q.out
@@ -0,0 +1,301 @@
+PREHOOK: query: create temporary table q1 (a int, b string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:default
+PREHOOK: Output: default@q1
+POSTHOOK: query: create temporary table q1 (a int, b string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@q1
+PREHOOK: query: insert into q1 values (1, 'A')
+PREHOOK: type: QUERY
+PREHOOK: Input: default@values__tmp__table__1
+PREHOOK: Output: default@q1
+POSTHOOK: query: insert into q1 values (1, 'A')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@values__tmp__table__1
+POSTHOOK: Output: default@q1
+POSTHOOK: Lineage: q1.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: q1.b SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: show tables
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:default
+POSTHOOK: query: show tables
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:default
+alltypesorc
+cbo_t1
+cbo_t2
+cbo_t3
+lineitem
+part
+q1
+src
+src1
+src_cbo
+src_json
+src_sequencefile
+src_thrift
+srcbucket
+srcbucket2
+srcpart
+values__tmp__table__1
+PREHOOK: query: explain
+with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+
+Stage-3
+ Fetch Operator
+ limit:-1
+ Stage-4
+ Reducer 3
+ File Output Operator [FS_15]
+ Merge Join Operator [MERGEJOIN_20] (rows=1 width=0)
+ Conds:RS_11._col0=RS_12._col0(Inner),Output:["_col0"]
+ <-Map 2 [SIMPLE_EDGE]
+ SHUFFLE [RS_11]
+ PartitionCols:_col0
+ Select Operator [SEL_7] (rows=1 width=0)
+ Output:["_col0"]
+ Filter Operator [FIL_18] (rows=1 width=0)
+ predicate:key is not null
+ TableScan [TS_5] (rows=1 width=0)
+ default@q1,a,Tbl:PARTIAL,Col:NONE,Output:["key"]
+ <-Map 4 [SIMPLE_EDGE]
+ SHUFFLE [RS_12]
+ PartitionCols:_col0
+ Select Operator [SEL_10] (rows=1 width=0)
+ Output:["_col0"]
+ Filter Operator [FIL_19] (rows=1 width=0)
+ predicate:key is not null
+ TableScan [TS_8] (rows=1 width=0)
+ default@q1,a,Tbl:PARTIAL,Col:NONE,Output:["key"]
+ Stage-2
+ Dependency Collection{}
+ Stage-1
+ Map 1
+ File Output Operator [FS_3]
+ table:{"name:":"default.q1"}
+ Select Operator [SEL_2] (rows=250 width=10)
+ Output:["_col0","_col1"]
+ Filter Operator [FIL_4] (rows=250 width=10)
+ predicate:(key = '5')
+ TableScan [TS_0] (rows=500 width=10)
+ default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
+ Stage-0
+ Move Operator
+ Please refer to the previous Stage-1
+
+PREHOOK: query: with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@q1
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@q1
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@q1
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@q1
+#### A masked pattern was here ####
+5
+5
+5
+5
+5
+5
+5
+5
+5
+PREHOOK: query: show tables
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:default
+POSTHOOK: query: show tables
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:default
+alltypesorc
+cbo_t1
+cbo_t2
+cbo_t3
+lineitem
+part
+q1
+src
+src1
+src_cbo
+src_json
+src_sequencefile
+src_thrift
+srcbucket
+srcbucket2
+srcpart
+values__tmp__table__1
+PREHOOK: query: select * from q1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@q1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from q1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@q1
+#### A masked pattern was here ####
+1 A
+PREHOOK: query: drop table q1
+PREHOOK: type: DROPTABLE
+PREHOOK: Input: default@q1
+PREHOOK: Output: default@q1
+POSTHOOK: query: drop table q1
+POSTHOOK: type: DROPTABLE
+POSTHOOK: Input: default@q1
+POSTHOOK: Output: default@q1
+PREHOOK: query: show tables
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:default
+POSTHOOK: query: show tables
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:default
+alltypesorc
+cbo_t1
+cbo_t2
+cbo_t3
+lineitem
+part
+src
+src1
+src_cbo
+src_json
+src_sequencefile
+src_thrift
+srcbucket
+srcbucket2
+srcpart
+values__tmp__table__1
+PREHOOK: query: explain
+with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+
+Stage-3
+ Fetch Operator
+ limit:-1
+ Stage-4
+ Reducer 3
+ File Output Operator [FS_15]
+ Merge Join Operator [MERGEJOIN_20] (rows=1 width=0)
+ Conds:RS_11._col0=RS_12._col0(Inner),Output:["_col0"]
+ <-Map 2 [SIMPLE_EDGE]
+ SHUFFLE [RS_11]
+ PartitionCols:_col0
+ Select Operator [SEL_7] (rows=1 width=0)
+ Output:["_col0"]
+ Filter Operator [FIL_18] (rows=1 width=0)
+ predicate:key is not null
+ TableScan [TS_5] (rows=1 width=0)
+ default@q1,a,Tbl:PARTIAL,Col:NONE,Output:["key"]
+ <-Map 4 [SIMPLE_EDGE]
+ SHUFFLE [RS_12]
+ PartitionCols:_col0
+ Select Operator [SEL_10] (rows=1 width=0)
+ Output:["_col0"]
+ Filter Operator [FIL_19] (rows=1 width=0)
+ predicate:key is not null
+ TableScan [TS_8] (rows=1 width=0)
+ default@q1,a,Tbl:PARTIAL,Col:NONE,Output:["key"]
+ Stage-2
+ Dependency Collection{}
+ Stage-1
+ Map 1
+ File Output Operator [FS_3]
+ table:{"name:":"default.q1"}
+ Select Operator [SEL_2] (rows=250 width=10)
+ Output:["_col0","_col1"]
+ Filter Operator [FIL_4] (rows=250 width=10)
+ predicate:(key = '5')
+ TableScan [TS_0] (rows=500 width=10)
+ default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
+ Stage-0
+ Move Operator
+ Please refer to the previous Stage-1
+
+PREHOOK: query: with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@q1
+PREHOOK: Input: default@src
+PREHOOK: Output: database:default
+PREHOOK: Output: default@q1
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as (select * from src where key= '5')
+select a.key
+from q1 a join q1 b
+on a.key=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@q1
+POSTHOOK: Input: default@src
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@q1
+#### A masked pattern was here ####
+5
+5
+5
+5
+5
+5
+5
+5
+5
+PREHOOK: query: show tables
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:default
+POSTHOOK: query: show tables
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:default
+alltypesorc
+cbo_t1
+cbo_t2
+cbo_t3
+lineitem
+part
+src
+src1
+src_cbo
+src_json
+src_sequencefile
+src_thrift
+srcbucket
+srcbucket2
+srcpart
+values__tmp__table__1

http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/tez/cte_mat_5.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/cte_mat_5.q.out b/ql/src/test/results/clientpositive/tez/cte_mat_5.q.out
new file mode 100644
index 0000000..52342c5
--- /dev/null
+++ b/ql/src/test/results/clientpositive/tez/cte_mat_5.q.out
@@ -0,0 +1,149 @@
+PREHOOK: query: create database mydb
+PREHOOK: type: CREATEDATABASE
+PREHOOK: Output: database:mydb
+POSTHOOK: query: create database mydb
+POSTHOOK: type: CREATEDATABASE
+POSTHOOK: Output: database:mydb
+PREHOOK: query: use mydb
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:mydb
+POSTHOOK: query: use mydb
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:mydb
+PREHOOK: query: create table q1 (colnum int, colstring string)
+PREHOOK: type: CREATETABLE
+PREHOOK: Output: database:mydb
+PREHOOK: Output: mydb@q1
+POSTHOOK: query: create table q1 (colnum int, colstring string)
+POSTHOOK: type: CREATETABLE
+POSTHOOK: Output: database:mydb
+POSTHOOK: Output: mydb@q1
+PREHOOK: query: insert into q1 values (5, 'A')
+PREHOOK: type: QUERY
+PREHOOK: Input: mydb@values__tmp__table__1
+PREHOOK: Output: mydb@q1
+POSTHOOK: query: insert into q1 values (5, 'A')
+POSTHOOK: type: QUERY
+POSTHOOK: Input: mydb@values__tmp__table__1
+POSTHOOK: Output: mydb@q1
+POSTHOOK: Lineage: q1.colnum EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
+POSTHOOK: Lineage: q1.colstring SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
+PREHOOK: query: use default
+PREHOOK: type: SWITCHDATABASE
+PREHOOK: Input: database:default
+POSTHOOK: query: use default
+POSTHOOK: type: SWITCHDATABASE
+POSTHOOK: Input: database:default
+PREHOOK: query: show tables in mydb
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:mydb
+POSTHOOK: query: show tables in mydb
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:mydb
+q1
+values__tmp__table__1
+PREHOOK: query: show tables
+PREHOOK: type: SHOWTABLES
+PREHOOK: Input: database:default
+POSTHOOK: query: show tables
+POSTHOOK: type: SHOWTABLES
+POSTHOOK: Input: database:default
+alltypesorc
+cbo_t1
+cbo_t2
+cbo_t3
+lineitem
+part
+src
+src1
+src_cbo
+src_json
+src_sequencefile
+src_thrift
+srcbucket
+srcbucket2
+srcpart
+PREHOOK: query: explain
+with q1 as (select * from src where key= '5')
+select a.colnum
+from mydb.q1 as a join q1 as b
+on a.colnum=b.key
+PREHOOK: type: QUERY
+POSTHOOK: query: explain
+with q1 as (select * from src where key= '5')
+select a.colnum
+from mydb.q1 as a join q1 as b
+on a.colnum=b.key
+POSTHOOK: type: QUERY
+Plan optimized by CBO.
+
+Vertex dependency in root stage
+Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
+
+Stage-3
+ Fetch Operator
+ limit:-1
+ Stage-4
+ Reducer 3
+ File Output Operator [FS_15]
+ Merge Join Operator [MERGEJOIN_20] (rows=1 width=3)
+ Conds:RS_11.UDFToDouble(_col0)=RS_12.UDFToDouble(_col0)(Inner),Output:["_col0"]
+ <-Map 2 [SIMPLE_EDGE]
+ SHUFFLE [RS_11]
+ PartitionCols:UDFToDouble(_col0)
+ Select Operator [SEL_7] (rows=1 width=3)
+ Output:["_col0"]
+ Filter Operator [FIL_18] (rows=1 width=3)
+ predicate:colnum is not null
+ TableScan [TS_5] (rows=1 width=3)
+ mydb@q1,a,Tbl:COMPLETE,Col:NONE,Output:["colnum"]
+ <-Map 4 [SIMPLE_EDGE]
+ SHUFFLE [RS_12]
+ PartitionCols:UDFToDouble(_col0)
+ Select Operator [SEL_10] (rows=1 width=0)
+ Output:["_col0"]
+ Filter Operator [FIL_19] (rows=1 width=0)
+ predicate:key is not null
+ TableScan [TS_8] (rows=1 width=0)
+ default@q1,b,Tbl:PARTIAL,Col:NONE,Output:["key"]
+ Stage-2
+ Dependency Collection{}
+ Stage-1
+ Map 1
+ File Output Operator [FS_3]
+ table:{"name:":"default.q1"}
+ Select Operator [SEL_2] (rows=250 width=10)
+ Output:["_col0","_col1"]
+ Filter Operator [FIL_4] (rows=250 width=10)
+ predicate:(key = '5')
+ TableScan [TS_0] (rows=500 width=10)
+ default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key","value"]
+ Stage-0
+ Move Operator
+ Please refer to the previous Stage-1
+
+PREHOOK: query: with q1 as (select * from src where key= '5')
+select a.colnum
+from mydb.q1 as a join q1 as b
+on a.colnum=b.key
+PREHOOK: type: QUERY
+PREHOOK: Input: default@q1
+PREHOOK: Input: default@src
+PREHOOK: Input: mydb@q1
+PREHOOK: Output: database:default
+PREHOOK: Output: default@q1
+#### A masked pattern was here ####
+POSTHOOK: query: with q1 as (select * from src where key= '5')
+select a.colnum
+from mydb.q1 as a join q1 as b
+on a.colnum=b.key
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@q1
+POSTHOOK: Input: default@src
+POSTHOOK: Input: mydb@q1
+POSTHOOK: Output: database:default
+POSTHOOK: Output: default@q1
+#### A masked pattern was here ####
+5
+5
+5

http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/tez/tez_union.q.out
----------------------------------------------------------------------
diff --git a/ql/src/test/results/clientpositive/tez/tez_union.q.out b/ql/src/test/results/clientpositive/tez/tez_union.q.out
index e5fae03..359d27c 100644
--- a/ql/src/test/results/clientpositive/tez/tez_union.q.out
+++ b/ql/src/test/results/clientpositive/tez/tez_union.q.out
@@ -1405,7 +1405,6 @@ POSTHOOK: Input: default@table1
  POSTHOOK: Input: default@table2
  POSTHOOK: Output: database:default
  POSTHOOK: Output: default@TABLE3
-POSTHOOK: Output: default@table3
  PREHOOK: query: explain formatted select count(*) from TABLE3
  PREHOOK: type: QUERY
  POSTHOOK: query: explain formatted select count(*) from TABLE3

Search Discussions

  • Jcamacho at Feb 12, 2016 at 6:41 pm
    HIVE-11752: Pre-materializing complex CTE queries (Navis, Jesus Camacho Rodriguez, reviewed by Laljo John Pullokkaran)


    Project: http://git-wip-us.apache.org/repos/asf/hive/repo
    Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/dca4233d
    Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/dca4233d
    Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/dca4233d

    Branch: refs/heads/master
    Commit: dca4233da112b10f8ada27c392c6ad458288c319
    Parents: 4baf475
    Author: Jesus Camacho Rodriguez <jcamacho@apache.org>
    Authored: Fri Feb 5 11:32:21 2016 +0100
    Committer: Jesus Camacho Rodriguez <jcamacho@apache.org>
    Committed: Fri Feb 12 19:40:32 2016 +0100

    ----------------------------------------------------------------------
      .../org/apache/hadoop/hive/conf/HiveConf.java | 5 +
      .../test/resources/testconfiguration.properties | 20 +
      .../java/org/apache/hadoop/hive/ql/Context.java | 46 +-
      .../java/org/apache/hadoop/hive/ql/Driver.java | 4 +-
      .../org/apache/hadoop/hive/ql/QueryPlan.java | 9 +-
      .../org/apache/hadoop/hive/ql/exec/DDLTask.java | 250 ++------
      .../org/apache/hadoop/hive/ql/exec/Task.java | 16 +
      .../apache/hadoop/hive/ql/metadata/Table.java | 31 +-
      .../hive/ql/optimizer/GenMapRedUtils.java | 7 +-
      .../hive/ql/parse/BaseSemanticAnalyzer.java | 14 +-
      .../hadoop/hive/ql/parse/CalcitePlanner.java | 53 ++
      .../hive/ql/parse/ExplainSemanticAnalyzer.java | 2 +-
      .../hadoop/hive/ql/parse/GenTezProcContext.java | 7 +-
      .../hadoop/hive/ql/parse/SemanticAnalyzer.java | 612 +++++++++++++------
      .../hadoop/hive/ql/parse/TaskCompiler.java | 2 +-
      .../hadoop/hive/ql/plan/CreateTableDesc.java | 195 +++++-
      .../hadoop/hive/ql/plan/FileSinkDesc.java | 9 +
      .../apache/hadoop/hive/ql/stats/StatsUtils.java | 33 +-
      .../hadoop/hive/ql/parse/TestGenTezWork.java | 14 +-
      ql/src/test/queries/clientpositive/cte_3.q | 31 +
      ql/src/test/queries/clientpositive/cte_4.q | 56 ++
      ql/src/test/queries/clientpositive/cte_5.q | 23 +
      ql/src/test/queries/clientpositive/cte_mat_1.q | 8 +
      ql/src/test/queries/clientpositive/cte_mat_2.q | 8 +
      ql/src/test/queries/clientpositive/cte_mat_3.q | 8 +
      ql/src/test/queries/clientpositive/cte_mat_4.q | 39 ++
      ql/src/test/queries/clientpositive/cte_mat_5.q | 23 +
      .../test/results/clientnegative/analyze1.q.out | 2 +-
      .../test/results/clientnegative/dyn_part1.q.out | 2 +-
      .../clientpositive/alter_view_as_select.q.out | 2 -
      ql/src/test/results/clientpositive/cte_3.q.out | 444 ++++++++++++++
      ql/src/test/results/clientpositive/cte_4.q.out | 219 +++++++
      ql/src/test/results/clientpositive/cte_5.q.out | 156 +++++
      .../test/results/clientpositive/cte_mat_1.q.out | 72 +++
      .../test/results/clientpositive/cte_mat_2.q.out | 72 +++
      .../test/results/clientpositive/cte_mat_3.q.out | 147 +++++
      .../test/results/clientpositive/cte_mat_4.q.out | 477 +++++++++++++++
      .../test/results/clientpositive/cte_mat_5.q.out | 238 ++++++++
      .../results/clientpositive/llap/cte_1.q.out | 126 ++++
      .../results/clientpositive/llap/cte_2.q.out | 189 ++++++
      .../results/clientpositive/llap/cte_3.q.out | 294 +++++++++
      .../results/clientpositive/llap/cte_4.q.out | 219 +++++++
      .../results/clientpositive/llap/cte_5.q.out | 168 +++++
      .../results/clientpositive/llap/cte_mat_1.q.out | 83 +++
      .../results/clientpositive/llap/cte_mat_2.q.out | 83 +++
      .../results/clientpositive/llap/cte_mat_3.q.out | 122 ++++
      .../results/clientpositive/llap/cte_mat_4.q.out | 429 +++++++++++++
      .../results/clientpositive/llap/cte_mat_5.q.out | 214 +++++++
      .../results/clientpositive/llap/tez_union.q.out | 1 -
      .../test/results/clientpositive/tez/cte_1.q.out | 111 ++++
      .../test/results/clientpositive/tez/cte_2.q.out | 189 ++++++
      .../test/results/clientpositive/tez/cte_3.q.out | 187 ++++++
      .../test/results/clientpositive/tez/cte_4.q.out | 219 +++++++
      .../test/results/clientpositive/tez/cte_5.q.out | 127 ++++
      .../results/clientpositive/tez/cte_mat_1.q.out | 44 ++
      .../results/clientpositive/tez/cte_mat_2.q.out | 44 ++
      .../results/clientpositive/tez/cte_mat_3.q.out | 59 ++
      .../results/clientpositive/tez/cte_mat_4.q.out | 301 +++++++++
      .../results/clientpositive/tez/cte_mat_5.q.out | 149 +++++
      .../results/clientpositive/tez/tez_union.q.out | 1 -
      60 files changed, 6281 insertions(+), 434 deletions(-)
    ----------------------------------------------------------------------


    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    ----------------------------------------------------------------------
    diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    index cd17a84..3ab1dba 100644
    --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java
    @@ -1363,6 +1363,11 @@ public class HiveConf extends Configuration {
              "If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime\n" +
              "would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op."),

    + // CTE
    + HIVE_CTE_MATERIALIZE_THRESHOLD("hive.optimize.cte.materialize.threshold", -1,
    + "If the number of references to a CTE clause exceeds this threshold, Hive will materialize it\n" +
    + "before executing the main query block. -1 will disable this feature."),
    +
          // Indexes
          HIVEOPTINDEXFILTER_COMPACT_MINSIZE("hive.optimize.index.filter.compact.minsize", (long) 5 * 1024 * 1024 * 1024,
              "Minimum size (in bytes) of the inputs on which a compact index is automatically used."), // 5G

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/itests/src/test/resources/testconfiguration.properties
    ----------------------------------------------------------------------
    diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties
    index cbb9743..2d0a1e3 100644
    --- a/itests/src/test/resources/testconfiguration.properties
    +++ b/itests/src/test/resources/testconfiguration.properties
    @@ -100,6 +100,16 @@ minitez.query.files.shared=acid_globallimit.q,\
        cross_product_check_1.q,\
        cross_product_check_2.q,\
        ctas.q,\
    + cte_1.q,\
    + cte_2.q,\
    + cte_3.q,\
    + cte_4.q,\
    + cte_5.q,\
    + cte_mat_1.q,\
    + cte_mat_2.q,\
    + cte_mat_3.q,\
    + cte_mat_4.q,\
    + cte_mat_5.q,\
        custom_input_output_format.q,\
        delete_all_non_partitioned.q,\
        delete_all_partitioned.q,\
    @@ -437,6 +447,16 @@ minitez.query.files=bucket_map_join_tez1.q,\
      minillap.query.files=bucket_map_join_tez1.q,\
        bucket_map_join_tez2.q,\
        constprog_dpp.q,\
    + cte_1.q,\
    + cte_2.q,\
    + cte_3.q,\
    + cte_4.q,\
    + cte_5.q,\
    + cte_mat_1.q,\
    + cte_mat_2.q,\
    + cte_mat_3.q,\
    + cte_mat_4.q,\
    + cte_mat_5.q,\
        dynamic_partition_pruning.q,\
        dynamic_partition_pruning_2.q,\
        hybridgrace_hashjoin_1.q,\

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/Context.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
    index 746456b..6f18c82 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java
    @@ -29,10 +29,9 @@ import java.util.List;
      import java.util.Map;
      import java.util.Random;
      import java.util.concurrent.ConcurrentHashMap;
    +import java.util.concurrent.atomic.AtomicInteger;

      import org.antlr.runtime.TokenRewriteStream;
    -import org.slf4j.Logger;
    -import org.slf4j.LoggerFactory;
      import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.fs.ContentSummary;
      import org.apache.hadoop.fs.FileStatus;
    @@ -45,13 +44,15 @@ import org.apache.hadoop.hive.ql.exec.TaskRunner;
      import org.apache.hadoop.hive.ql.hooks.WriteEntity;
      import org.apache.hadoop.hive.ql.io.AcidUtils;
      import org.apache.hadoop.hive.ql.lockmgr.HiveLock;
    -import org.apache.hadoop.hive.ql.lockmgr.HiveLockManager;
      import org.apache.hadoop.hive.ql.lockmgr.HiveLockObj;
      import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager;
    +import org.apache.hadoop.hive.ql.metadata.Table;
      import org.apache.hadoop.hive.ql.plan.LoadTableDesc;
      import org.apache.hadoop.hive.ql.session.SessionState;
      import org.apache.hadoop.hive.shims.ShimLoader;
      import org.apache.hadoop.util.StringUtils;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;

      /**
       * Context for Semantic Analyzers. Usage: not reusable - construct a new one for
    @@ -108,6 +109,10 @@ public class Context {

        private boolean needLockMgr;

    + private AtomicInteger sequencer = new AtomicInteger();
    +
    + private final Map<String, Table> cteTables = new HashMap<String, Table>();
    +
        // Keep track of the mapping from load table desc to the output and the lock
        private final Map<LoadTableDesc, WriteEntity> loadTableOutputMap =
            new HashMap<LoadTableDesc, WriteEntity>();
    @@ -360,6 +365,28 @@ public class Context {
          fsScratchDirs.clear();
        }

    + /**
    + * Remove any created directories for CTEs.
    + */
    + public void removeMaterializedCTEs() {
    + // clean CTE tables
    + for (Table materializedTable : cteTables.values()) {
    + Path location = materializedTable.getDataLocation();
    + try {
    + FileSystem fs = location.getFileSystem(conf);
    + if (fs.exists(location)) {
    + fs.delete(location, true);
    + LOG.info("Removed " + location + " for materialized " + materializedTable.getTableName());
    + }
    + } catch (IOException e) {
    + // ignore
    + LOG.warn("Error removing " + location + " for materialized " + materializedTable.getTableName() +
    + ": " + StringUtils.stringifyException(e));
    + }
    + }
    + cteTables.clear();
    + }
    +
        private String nextPathId() {
          return Integer.toString(pathid++);
        }
    @@ -484,6 +511,7 @@ public class Context {
              LOG.info("Context clear error: " + StringUtils.stringifyException(e));
            }
          }
    + removeMaterializedCTEs();
          removeScratchDir();
          originalTracker = null;
          setNeedLockMgr(false);
    @@ -717,6 +745,18 @@ public class Context {
          this.cboSucceeded = cboSucceeded;
        }

    + public Table getMaterializedTable(String cteName) {
    + return cteTables.get(cteName);
    + }
    +
    + public void addMaterializedTable(String cteName, Table table) {
    + cteTables.put(cteName, table);
    + }
    +
    + public AtomicInteger getSequencer() {
    + return sequencer;
    + }
    +
        public CompilationOpContext getOpContext() {
          return opContext;
        }

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    index 2163e9b..10bd97b 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java
    @@ -475,7 +475,7 @@ public class Driver implements CommandProcessor {
              sem.analyze(tree, ctx);
              hookCtx.update(sem);
              for (HiveSemanticAnalyzerHook hook : saHooks) {
    - hook.postAnalyze(hookCtx, sem.getRootTasks());
    + hook.postAnalyze(hookCtx, sem.getAllRootTasks());
              }
            } else {
              sem.analyze(tree, ctx);
    @@ -598,7 +598,7 @@ public class Driver implements CommandProcessor {
          ByteArrayOutputStream baos = new ByteArrayOutputStream();
          PrintStream ps = new PrintStream(baos);
          try {
    - List<Task<?>> rootTasks = sem.getRootTasks();
    + List<Task<?>> rootTasks = sem.getAllRootTasks();
            task.getJSONPlan(ps, astTree, rootTasks, sem.getFetchTask(), false, true, true);
            ret = baos.toString();
          } catch (Exception e) {

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
    index 9132a21..4933b34 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryPlan.java
    @@ -116,13 +116,12 @@ public class QueryPlan implements Serializable {
                         HiveOperation operation, Schema resultSchema) {
          this.queryString = queryString;

    - rootTasks = new ArrayList<Task<? extends Serializable>>();
    - this.reducerTimeStatsPerJobList = new ArrayList<ReducerTimeStatsPerJob>();
    - rootTasks.addAll(sem.getRootTasks());
    + rootTasks = new ArrayList<Task<? extends Serializable>>(sem.getAllRootTasks());
    + reducerTimeStatsPerJobList = new ArrayList<ReducerTimeStatsPerJob>();
          fetchTask = sem.getFetchTask();
          // Note that inputs and outputs can be changed when the query gets executed
    - inputs = sem.getInputs();
    - outputs = sem.getOutputs();
    + inputs = sem.getAllInputs();
    + outputs = sem.getAllOutputs();
          linfo = sem.getLineageInfo();
          tableAccessInfo = sem.getTableAccessInfo();
          columnAccessInfo = sem.getColumnAccessInfo();

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    index be6ea63..a81eb18 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
    @@ -18,18 +18,39 @@

      package org.apache.hadoop.hive.ql.exec;

    -import com.google.common.collect.ImmutableList;
    -import com.google.common.collect.Iterables;
    +import static org.apache.commons.lang.StringUtils.join;
    +import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
    +import static org.apache.hadoop.util.StringUtils.stringifyException;
    +
    +import java.io.BufferedWriter;
    +import java.io.DataOutputStream;
    +import java.io.FileNotFoundException;
    +import java.io.IOException;
    +import java.io.OutputStreamWriter;
    +import java.io.Serializable;
    +import java.io.Writer;
    +import java.net.URI;
    +import java.net.URISyntaxException;
    +import java.sql.SQLException;
    +import java.util.AbstractList;
    +import java.util.ArrayList;
    +import java.util.Arrays;
    +import java.util.Collections;
    +import java.util.Comparator;
    +import java.util.HashMap;
    +import java.util.HashSet;
    +import java.util.Iterator;
    +import java.util.LinkedHashMap;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.Map.Entry;
    +import java.util.Set;
    +import java.util.SortedSet;
    +import java.util.TreeMap;
    +import java.util.TreeSet;

      import org.apache.commons.lang.StringEscapeUtils;
      import org.apache.commons.lang.StringUtils;
    -import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
    -import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
    -import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo;
    -import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo;
    -import org.apache.hadoop.mapreduce.MRJobConfig;
    -import org.slf4j.Logger;
    -import org.slf4j.LoggerFactory;
      import org.apache.hadoop.fs.FSDataOutputStream;
      import org.apache.hadoop.fs.FileStatus;
      import org.apache.hadoop.fs.FileSystem;
    @@ -77,6 +98,8 @@ import org.apache.hadoop.hive.ql.io.AcidUtils;
      import org.apache.hadoop.hive.ql.io.RCFileInputFormat;
      import org.apache.hadoop.hive.ql.io.merge.MergeFileTask;
      import org.apache.hadoop.hive.ql.io.merge.MergeFileWork;
    +import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat;
    +import org.apache.hadoop.hive.ql.io.orc.OrcSerde;
      import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe;
      import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateTask;
      import org.apache.hadoop.hive.ql.io.rcfile.truncate.ColumnTruncateWork;
    @@ -91,7 +114,6 @@ import org.apache.hadoop.hive.ql.metadata.CheckResult;
      import org.apache.hadoop.hive.ql.metadata.Hive;
      import org.apache.hadoop.hive.ql.metadata.HiveException;
      import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker;
    -import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
      import org.apache.hadoop.hive.ql.metadata.HiveUtils;
      import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
      import org.apache.hadoop.hive.ql.metadata.Partition;
    @@ -177,7 +199,6 @@ import org.apache.hadoop.hive.serde.serdeConstants;
      import org.apache.hadoop.hive.serde2.AbstractSerDe;
      import org.apache.hadoop.hive.serde2.Deserializer;
      import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe;
    -import org.apache.hadoop.hive.serde2.SerDeException;
      import org.apache.hadoop.hive.serde2.SerDeSpec;
      import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe;
      import org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe;
    @@ -194,42 +215,17 @@ import org.apache.hadoop.hive.shims.HadoopShims;
      import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatus;
      import org.apache.hadoop.hive.shims.ShimLoader;
      import org.apache.hadoop.io.IOUtils;
    +import org.apache.hadoop.mapreduce.MRJobConfig;
      import org.apache.hadoop.tools.HadoopArchives;
      import org.apache.hadoop.util.ToolRunner;
      import org.apache.hive.common.util.AnnotationUtils;
      import org.apache.hive.common.util.ReflectionUtil;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;
      import org.stringtemplate.v4.ST;

    -import java.io.BufferedWriter;
    -import java.io.DataOutputStream;
    -import java.io.FileNotFoundException;
    -import java.io.IOException;
    -import java.io.OutputStreamWriter;
    -import java.io.Serializable;
    -import java.io.Writer;
    -import java.net.URI;
    -import java.net.URISyntaxException;
    -import java.sql.SQLException;
    -import java.util.AbstractList;
    -import java.util.ArrayList;
    -import java.util.Arrays;
    -import java.util.Collections;
    -import java.util.Comparator;
    -import java.util.HashMap;
    -import java.util.HashSet;
    -import java.util.Iterator;
    -import java.util.LinkedHashMap;
    -import java.util.List;
    -import java.util.Map;
    -import java.util.Map.Entry;
    -import java.util.Set;
    -import java.util.SortedSet;
    -import java.util.TreeMap;
    -import java.util.TreeSet;
    -
    -import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE;
    -import static org.apache.commons.lang.StringUtils.join;
    -import static org.apache.hadoop.util.StringUtils.stringifyException;
    +import com.google.common.collect.ImmutableList;
    +import com.google.common.collect.Iterables;

      /**
       * DDLTask implementation.
    @@ -3777,10 +3773,14 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
          return true;
        }

    + private void validateSerDe(String serdeName) throws HiveException {
    + validateSerDe(serdeName, conf);
    + }
    +
        /**
         * Check if the given serde is valid.
         */
    - private void validateSerDe(String serdeName) throws HiveException {
    + public static void validateSerDe(String serdeName, HiveConf conf) throws HiveException {
          try {

            Deserializer d = ReflectionUtil.newInstance(conf.getClassByName(serdeName).
    @@ -3891,161 +3891,9 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         */
        private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException {
          // create the table
    - Table tbl;
    - if (crtTbl.getDatabaseName() == null || (crtTbl.getTableName().contains("."))){
    - tbl = db.newTable(crtTbl.getTableName());
    - }else {
    - tbl = new Table(crtTbl.getDatabaseName(),crtTbl.getTableName());
    - }
    -
    - if (crtTbl.getTblProps() != null) {
    - tbl.getTTable().getParameters().putAll(crtTbl.getTblProps());
    - }
    -
    - if (crtTbl.getPartCols() != null) {
    - tbl.setPartCols(crtTbl.getPartCols());
    - }
    - if (crtTbl.getNumBuckets() != -1) {
    - tbl.setNumBuckets(crtTbl.getNumBuckets());
    - }
    -
    - if (crtTbl.getStorageHandler() != null) {
    - tbl.setProperty(META_TABLE_STORAGE, crtTbl.getStorageHandler());
    - }
    - HiveStorageHandler storageHandler = tbl.getStorageHandler();
    -
    - /*
    - * We use LazySimpleSerDe by default.
    - *
    - * If the user didn't specify a SerDe, and any of the columns are not simple
    - * types, we will have to use DynamicSerDe instead.
    - */
    - if (crtTbl.getSerName() == null) {
    - if (storageHandler == null) {
    - LOG.info("Default to LazySimpleSerDe for table " + crtTbl.getTableName());
    - tbl.setSerializationLib(org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName());
    - } else {
    - String serDeClassName = storageHandler.getSerDeClass().getName();
    - LOG.info("Use StorageHandler-supplied " + serDeClassName
    - + " for table " + crtTbl.getTableName());
    - tbl.setSerializationLib(serDeClassName);
    - }
    - } else {
    - // let's validate that the serde exists
    - validateSerDe(crtTbl.getSerName());
    - tbl.setSerializationLib(crtTbl.getSerName());
    - }
    -
    - if (crtTbl.getFieldDelim() != null) {
    - tbl.setSerdeParam(serdeConstants.FIELD_DELIM, crtTbl.getFieldDelim());
    - tbl.setSerdeParam(serdeConstants.SERIALIZATION_FORMAT, crtTbl.getFieldDelim());
    - }
    - if (crtTbl.getFieldEscape() != null) {
    - tbl.setSerdeParam(serdeConstants.ESCAPE_CHAR, crtTbl.getFieldEscape());
    - }
    -
    - if (crtTbl.getCollItemDelim() != null) {
    - tbl.setSerdeParam(serdeConstants.COLLECTION_DELIM, crtTbl.getCollItemDelim());
    - }
    - if (crtTbl.getMapKeyDelim() != null) {
    - tbl.setSerdeParam(serdeConstants.MAPKEY_DELIM, crtTbl.getMapKeyDelim());
    - }
    - if (crtTbl.getLineDelim() != null) {
    - tbl.setSerdeParam(serdeConstants.LINE_DELIM, crtTbl.getLineDelim());
    - }
    - if (crtTbl.getNullFormat() != null) {
    - tbl.setSerdeParam(serdeConstants.SERIALIZATION_NULL_FORMAT, crtTbl.getNullFormat());
    - }
    - if (crtTbl.getSerdeProps() != null) {
    - Iterator<Entry<String, String>> iter = crtTbl.getSerdeProps().entrySet()
    - .iterator();
    - while (iter.hasNext()) {
    - Entry<String, String> m = iter.next();
    - tbl.setSerdeParam(m.getKey(), m.getValue());
    - }
    - }
    -
    - if (crtTbl.getCols() != null) {
    - tbl.setFields(crtTbl.getCols());
    - }
    - if (crtTbl.getBucketCols() != null) {
    - tbl.setBucketCols(crtTbl.getBucketCols());
    - }
    - if (crtTbl.getSortCols() != null) {
    - tbl.setSortCols(crtTbl.getSortCols());
    - }
    - if (crtTbl.getComment() != null) {
    - tbl.setProperty("comment", crtTbl.getComment());
    - }
    - if (crtTbl.getLocation() != null) {
    - tbl.setDataLocation(new Path(crtTbl.getLocation()));
    - }
    -
    - if (crtTbl.getSkewedColNames() != null) {
    - tbl.setSkewedColNames(crtTbl.getSkewedColNames());
    - }
    - if (crtTbl.getSkewedColValues() != null) {
    - tbl.setSkewedColValues(crtTbl.getSkewedColValues());
    - }
    -
    - tbl.getTTable().setTemporary(crtTbl.isTemporary());
    -
    - tbl.setStoredAsSubDirectories(crtTbl.isStoredAsSubDirectories());
    -
    - tbl.setInputFormatClass(crtTbl.getInputFormat());
    - tbl.setOutputFormatClass(crtTbl.getOutputFormat());
    -
    - // only persist input/output format to metadata when it is explicitly specified.
    - // Otherwise, load lazily via StorageHandler at query time.
    - if (crtTbl.getInputFormat() != null && !crtTbl.getInputFormat().isEmpty()) {
    - tbl.getTTable().getSd().setInputFormat(tbl.getInputFormatClass().getName());
    - }
    - if (crtTbl.getOutputFormat() != null && !crtTbl.getOutputFormat().isEmpty()) {
    - tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName());
    - }
    -
    - if (!Utilities.isDefaultNameNode(conf) && doesTableNeedLocation(tbl)) {
    - // If location is specified - ensure that it is a full qualified name
    - makeLocationQualified(tbl.getDbName(), tbl.getTTable().getSd(), tbl.getTableName());
    - }
    -
    - if (crtTbl.isExternal()) {
    - tbl.setProperty("EXTERNAL", "TRUE");
    - tbl.setTableType(TableType.EXTERNAL_TABLE);
    - }
    -
    - // If the sorted columns is a superset of bucketed columns, store this fact.
    - // It can be later used to
    - // optimize some group-by queries. Note that, the order does not matter as
    - // long as it in the first
    - // 'n' columns where 'n' is the length of the bucketed columns.
    - if ((tbl.getBucketCols() != null) && (tbl.getSortCols() != null)) {
    - List<String> bucketCols = tbl.getBucketCols();
    - List<Order> sortCols = tbl.getSortCols();
    -
    - if ((sortCols.size() > 0) && (sortCols.size() >= bucketCols.size())) {
    - boolean found = true;
    -
    - Iterator<String> iterBucketCols = bucketCols.iterator();
    - while (iterBucketCols.hasNext()) {
    - String bucketCol = iterBucketCols.next();
    - boolean colFound = false;
    - for (int i = 0; i < bucketCols.size(); i++) {
    - if (bucketCol.equals(sortCols.get(i).getCol())) {
    - colFound = true;
    - break;
    - }
    - }
    - if (colFound == false) {
    - found = false;
    - break;
    - }
    - }
    - if (found) {
    - tbl.setProperty("SORTBUCKETCOLSPREFIX", "TRUE");
    - }
    - }
    - }
    + Table tbl = crtTbl.toTable(conf);
    + LOG.info("creating table " + tbl.getDbName() + "." + tbl.getTableName() + " on " +
    + tbl.getDataLocation());

          // create the table
          if (crtTbl.getReplaceMode()){
    @@ -4194,7 +4042,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {

          if (!Utilities.isDefaultNameNode(conf)) {
            // If location is specified - ensure that it is a full qualified name
    - makeLocationQualified(tbl.getDbName(), tbl.getTTable().getSd(), tbl.getTableName());
    + makeLocationQualified(tbl.getDbName(), tbl.getTTable().getSd(), tbl.getTableName(), conf);
          }

          // create the table
    @@ -4409,8 +4257,8 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
         * @param name
         * Object name.
         */
    - private void makeLocationQualified(String databaseName, StorageDescriptor sd, String name)
    - throws HiveException {
    + public static void makeLocationQualified(String databaseName, StorageDescriptor sd,
    + String name, HiveConf conf) throws HiveException {
          Path path = null;
          if (!sd.isSetLocation())
          {
    @@ -4483,7 +4331,7 @@ public class DDLTask extends Task<DDLWork> implements Serializable {
          }
        }

    - private static boolean doesTableNeedLocation(Table tbl) {
    + public static boolean doesTableNeedLocation(Table tbl) {
          // If we are ok with breaking compatibility of existing 3rd party StorageHandlers,
          // this method could be moved to the HiveStorageHandler interface.
          boolean retval = true;

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
    index 40c89cb..f2b15c5 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Task.java
    @@ -295,6 +295,22 @@ public abstract class Task<T extends Serializable> implements Serializable, Node
          return ret;
        }

    + @SuppressWarnings("unchecked")
    + public static List<Task<? extends Serializable>>
    + findLeafs(List<Task<? extends Serializable>> rootTasks) {
    + final List<Task<? extends Serializable>> leafTasks = new ArrayList<Task<?>>();
    +
    + NodeUtils.iterateTask(rootTasks, Task.class, new NodeUtils.Function<Task>() {
    + public void apply(Task task) {
    + List dependents = task.getDependentTasks();
    + if (dependents == null || dependents.isEmpty()) {
    + leafTasks.add(task);
    + }
    + }
    + });
    + return leafTasks;
    + }
    +
        /**
         * Remove the dependent task.
         *

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
    index a5217eb..5a72af1 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java
    @@ -18,9 +18,16 @@

      package org.apache.hadoop.hive.ql.metadata;

    +import java.io.Serializable;
    +import java.util.ArrayList;
    +import java.util.Arrays;
    +import java.util.HashMap;
    +import java.util.LinkedHashMap;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.Properties;
    +
      import org.apache.commons.lang3.StringUtils;
    -import org.slf4j.Logger;
    -import org.slf4j.LoggerFactory;
      import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.fs.FileStatus;
      import org.apache.hadoop.fs.FileSystem;
    @@ -57,15 +64,8 @@ import org.apache.hadoop.mapred.InputFormat;
      import org.apache.hadoop.mapred.OutputFormat;
      import org.apache.hadoop.mapred.SequenceFileInputFormat;
      import org.apache.hive.common.util.ReflectionUtil;
    -
    -import java.io.Serializable;
    -import java.util.ArrayList;
    -import java.util.Arrays;
    -import java.util.HashMap;
    -import java.util.LinkedHashMap;
    -import java.util.List;
    -import java.util.Map;
    -import java.util.Properties;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;

      /**
       * A Hive Table: is a fundamental unit of data in Hive that shares a common schema/DDL.
    @@ -94,6 +94,7 @@ public class Table implements Serializable {

        private transient TableSpec tableSpec;

    + private transient boolean materializedTable;

        /**
         * Used only for serialization.
    @@ -338,6 +339,14 @@ public class Table implements Serializable {
          return outputFormatClass;
        }

    + public boolean isMaterializedTable() {
    + return materializedTable;
    + }
    +
    + public void setMaterializedTable(boolean materializedTable) {
    + this.materializedTable = materializedTable;
    + }
    +
        /**
         * Marker SemanticException, so that processing that allows for table validation failures
         * and appropriately handles them can recover from these types of SemanticExceptions

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    index 46bf04c..812af9a 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java
    @@ -34,8 +34,6 @@ import java.util.Map.Entry;
      import java.util.Properties;
      import java.util.Set;

    -import org.slf4j.Logger;
    -import org.slf4j.LoggerFactory;
      import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.fs.Path;
      import org.apache.hadoop.hive.conf.HiveConf;
    @@ -115,6 +113,8 @@ import org.apache.hadoop.hive.serde2.SerDeException;
      import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector;
      import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
      import org.apache.hadoop.mapred.InputFormat;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;

      import com.google.common.base.Preconditions;
      import com.google.common.collect.Interner;
    @@ -1742,7 +1742,8 @@ public final class GenMapRedUtils {
            // no need of merging if the move is to a local file system
            MoveTask mvTask = (MoveTask) GenMapRedUtils.findMoveTask(mvTasks, fsOp);

    - if (mvTask != null && isInsertTable && hconf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER)) {
    + if (mvTask != null && isInsertTable && hconf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) &&
    + !fsOp.getConf().isMaterialization()) {
              GenMapRedUtils.addStatsTask(fsOp, mvTask, currTask, hconf);
            }


    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
    index 36c1259..28c8fdb 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java
    @@ -110,7 +110,7 @@ public abstract class BaseSemanticAnalyzer {
        public static int HIVE_COLUMN_ORDER_DESC = 0;

        /**
    - * ReadEntitites that are passed to the hooks.
    + * ReadEntities that are passed to the hooks.
         */
        protected HashSet<ReadEntity> inputs;
        /**
    @@ -1477,4 +1477,16 @@ public abstract class BaseSemanticAnalyzer {
        protected String toMessage(ErrorMsg message, Object detail) {
          return detail == null ? message.getMsg() : message.getMsg(detail.toString());
        }
    +
    + public List<Task<? extends Serializable>> getAllRootTasks() {
    + return rootTasks;
    + }
    +
    + public HashSet<ReadEntity> getAllInputs() {
    + return inputs;
    + }
    +
    + public HashSet<WriteEntity> getAllOutputs() {
    + return outputs;
    + }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
    index e2d404b..e7dc08c 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
    @@ -17,6 +17,7 @@
       */
      package org.apache.hadoop.hive.ql.parse;

    +import java.io.IOException;
      import java.lang.reflect.Field;
      import java.lang.reflect.InvocationTargetException;
      import java.lang.reflect.UndeclaredThrowableException;
    @@ -36,6 +37,7 @@ import java.util.Map;
      import java.util.Set;
      import java.util.concurrent.atomic.AtomicInteger;

    +import org.antlr.runtime.ClassicToken;
      import org.antlr.runtime.tree.TreeVisitor;
      import org.antlr.runtime.tree.TreeVisitorAction;
      import org.apache.calcite.plan.RelOptCluster;
    @@ -96,6 +98,7 @@ import org.apache.calcite.util.CompositeList;
      import org.apache.calcite.util.ImmutableBitSet;
      import org.apache.calcite.util.ImmutableIntList;
      import org.apache.calcite.util.Pair;
    +import org.apache.hadoop.fs.Path;
      import org.apache.hadoop.hive.conf.HiveConf;
      import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
      import org.apache.hadoop.hive.conf.HiveConf.StrictChecks;
    @@ -110,6 +113,7 @@ import org.apache.hadoop.hive.ql.exec.OperatorFactory;
      import org.apache.hadoop.hive.ql.exec.RowSchema;
      import org.apache.hadoop.hive.ql.lib.Node;
      import org.apache.hadoop.hive.ql.log.PerfLogger;
    +import org.apache.hadoop.hive.ql.metadata.HiveException;
      import org.apache.hadoop.hive.ql.metadata.Table;
      import org.apache.hadoop.hive.ql.metadata.VirtualColumn;
      import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException;
    @@ -185,6 +189,7 @@ import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc;
      import org.apache.hadoop.hive.ql.plan.ExprNodeDesc;
      import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils;
      import org.apache.hadoop.hive.ql.plan.GroupByDesc;
    +import org.apache.hadoop.hive.ql.plan.HiveOperation;
      import org.apache.hadoop.hive.ql.plan.SelectDesc;
      import org.apache.hadoop.hive.ql.session.SessionState;
      import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator;
    @@ -496,6 +501,54 @@ public class CalcitePlanner extends SemanticAnalyzer {
        }

        @Override
    + Table materializeCTE(String cteName, CTEClause cte) throws HiveException {
    +
    + ASTNode createTable = new ASTNode(new ClassicToken(HiveParser.TOK_CREATETABLE));
    +
    + ASTNode tableName = new ASTNode(new ClassicToken(HiveParser.TOK_TABNAME));
    + tableName.addChild(new ASTNode(new ClassicToken(HiveParser.Identifier, cteName)));
    +
    + ASTNode temporary = new ASTNode(new ClassicToken(HiveParser.KW_TEMPORARY, MATERIALIZATION_MARKER));
    +
    + createTable.addChild(tableName);
    + createTable.addChild(temporary);
    + createTable.addChild(cte.cteNode);
    +
    + CalcitePlanner analyzer = new CalcitePlanner(conf);
    + analyzer.initCtx(ctx);
    + analyzer.init(false);
    +
    + // should share cte contexts
    + analyzer.aliasToCTEs.putAll(aliasToCTEs);
    +
    + HiveOperation operation = SessionState.get().getHiveOperation();
    + try {
    + analyzer.analyzeInternal(createTable);
    + } finally {
    + SessionState.get().setCommandType(operation);
    + }
    +
    + Table table = analyzer.tableDesc.toTable(conf);
    + Path location = table.getDataLocation();
    + try {
    + location.getFileSystem(conf).mkdirs(location);
    + } catch (IOException e) {
    + throw new HiveException(e);
    + }
    + table.setMaterializedTable(true);
    +
    + LOG.info(cteName + " will be materialized into " + location);
    + cte.table = table;
    + cte.source = analyzer;
    +
    + ctx.addMaterializedTable(cteName, table);
    + // For CalcitePlanner, store qualified name too
    + ctx.addMaterializedTable(table.getDbName() + "." + table.getTableName(), table);
    +
    + return table;
    + }
    +
    + @Override
        String fixCtasColumnName(String colName) {
          if (runCBO) {
            int lastDot = colName.lastIndexOf('.');

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
    index e1e3eb2..e393be2 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java
    @@ -75,7 +75,7 @@ public class ExplainSemanticAnalyzer extends BaseSemanticAnalyzer {
          sem.validate();

          ctx.setResFile(ctx.getLocalTmpPath());
    - List<Task<? extends Serializable>> tasks = sem.getRootTasks();
    + List<Task<? extends Serializable>> tasks = sem.getAllRootTasks();
          if (tasks == null) {
            tasks = Collections.emptyList();
          }

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
    index ec5ef0e..0c160ac 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java
    @@ -26,6 +26,7 @@ import java.util.LinkedList;
      import java.util.List;
      import java.util.Map;
      import java.util.Set;
    +import java.util.concurrent.atomic.AtomicInteger;

      import org.apache.hadoop.fs.Path;
      import org.apache.hadoop.hive.conf.HiveConf;
    @@ -84,7 +85,7 @@ public class GenTezProcContext implements NodeProcessorCtx{
        public Operator<? extends OperatorDesc> parentOfRoot;

        // sequence number is used to name vertices (e.g.: Map 1, Reduce 14, ...)
    - private int sequenceNumber = 0;
    + private AtomicInteger sequenceNumber;

        // tez task we're currently processing
        public TezTask currentTask;
    @@ -200,12 +201,12 @@ public class GenTezProcContext implements NodeProcessorCtx{
          this.opMergeJoinWorkMap = new LinkedHashMap<Operator<?>, MergeJoinWork>();
          this.currentMergeJoinOperator = null;
          this.mapJoinToUnprocessedSmallTableReduceSinks = new HashMap<MapJoinOperator, Set<ReduceSinkOperator>>();
    + this.sequenceNumber = parseContext.getContext().getSequencer();

          rootTasks.add(currentTask);
        }

    - /** Not thread-safe. */
        public int nextSequenceNumber() {
    - return ++sequenceNumber;
    + return sequenceNumber.incrementAndGet();
        }
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    index f8a5dcd..8a06582 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
    @@ -221,6 +221,8 @@ import org.apache.hadoop.mapred.InputFormat;
      import org.apache.hadoop.mapred.OutputFormat;
      import org.apache.hadoop.security.UserGroupInformation;

    +import com.google.common.collect.Sets;
    +
      /**
       * Implementation of the semantic analyzer. It generates the query plan.
       * There are other specific semantic analyzers for some hive operations such as
    @@ -239,6 +241,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {

        private static final String VALUES_TMP_TABLE_NAME_PREFIX = "Values__Tmp__Table__";

    + static final String MATERIALIZATION_MARKER = "$MATERIALIZATION";
    +
        private HashMap<TableScanOperator, ExprNodeDesc> opToPartPruner;
        private HashMap<TableScanOperator, PrunedPartitionList> opToPartList;
        protected HashMap<String, TableScanOperator> topOps;
    @@ -294,17 +298,24 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
        /*
         * Capture the CTE definitions in a Query.
         */
    - private final Map<String, ASTNode> aliasToCTEs;
    + final Map<String, CTEClause> aliasToCTEs;
    +
        /*
         * Used to check recursive CTE invocations. Similar to viewsExpanded
         */
    - private ArrayList<String> ctesExpanded;
    + ArrayList<String> ctesExpanded;
    +
    + /*
    + * Whether root tasks after materialized CTE linkage have been resolved
    + */
    + boolean rootTasksResolved;
    +
    + CreateTableDesc tableDesc;

        /** Not thread-safe. */
        final ASTSearcher astSearcher = new ASTSearcher();

        protected AnalyzeRewriteContext analyzeRewrite;
    - private CreateTableDesc tableDesc;

        static class Phase1Ctx {
          String dest;
    @@ -341,7 +352,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
              HiveConf.ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME);
          queryProperties = new QueryProperties();
          opToPartToSkewedPruner = new HashMap<TableScanOperator, Map<String, ExprNodeDesc>>();
    - aliasToCTEs = new HashMap<String, ASTNode>();
    + aliasToCTEs = new HashMap<String, CTEClause>();
          globalLimitCtx = new GlobalLimitCtx();
          viewAliasToInput = new HashMap<String, ReadEntity>();
          noscan = partialscan = false;
    @@ -961,7 +972,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
            if ( aliasToCTEs.containsKey(qName)) {
              throw new SemanticException(ErrorMsg.AMBIGUOUS_TABLE_ALIAS.getMsg(cte.getChild(1)));
            }
    - aliasToCTEs.put(qName, cteQry);
    + aliasToCTEs.put(qName, new CTEClause(qName, cteQry));
          }
        }

    @@ -976,7 +987,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         * they appear in when adding them to the <code>aliasToCTEs</code> map.
         *
         */
    - private ASTNode findCTEFromName(QB qb, String cteName) {
    + private CTEClause findCTEFromName(QB qb, String cteName) {
          StringBuilder qId = new StringBuilder();
          if (qb.getId() != null) {
            qId.append(qb.getId());
    @@ -984,8 +995,9 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {

          while (qId.length() > 0) {
            String nm = qId + ":" + cteName;
    - if (aliasToCTEs.containsKey(nm)) {
    - return aliasToCTEs.get(nm);
    + CTEClause cte = aliasToCTEs.get(nm);
    + if (cte != null) {
    + return cte;
            }
            int lastIndex = qId.lastIndexOf(":");
            lastIndex = lastIndex < 0 ? 0 : lastIndex;
    @@ -1005,14 +1017,180 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
         * - trigger phase 1 on new QBExpr.
         * - update QB data structs: remove this as a table reference, move it to a SQ invocation.
         */
    - private void addCTEAsSubQuery(QB qb, String cteName, String cteAlias) throws SemanticException {
    + private void addCTEAsSubQuery(QB qb, String cteName, String cteAlias)
    + throws SemanticException {
          cteAlias = cteAlias == null ? cteName : cteAlias;
    - ASTNode cteQryNode = findCTEFromName(qb, cteName);
    + CTEClause cte = findCTEFromName(qb, cteName);
    + ASTNode cteQryNode = cte.cteNode;
          QBExpr cteQBExpr = new QBExpr(cteAlias);
          doPhase1QBExpr(cteQryNode, cteQBExpr, qb.getId(), cteAlias);
          qb.rewriteCTEToSubq(cteAlias, cteName, cteQBExpr);
        }

    + private final CTEClause rootClause = new CTEClause(null, null);
    +
    + @Override
    + public List<Task<? extends Serializable>> getAllRootTasks() {
    + if (!rootTasksResolved) {
    + rootTasks = toRealRootTasks(rootClause.asExecutionOrder());
    + rootTasksResolved = true;
    + }
    + return rootTasks;
    + }
    +
    + @Override
    + public HashSet<ReadEntity> getAllInputs() {
    + HashSet<ReadEntity> readEntities = new HashSet<ReadEntity>(getInputs());
    + for (CTEClause cte : rootClause.asExecutionOrder()) {
    + if (cte.source != null) {
    + readEntities.addAll(cte.source.getInputs());
    + }
    + }
    + return readEntities;
    + }
    +
    + @Override
    + public HashSet<WriteEntity> getAllOutputs() {
    + HashSet<WriteEntity> writeEntities = new HashSet<WriteEntity>(getOutputs());
    + for (CTEClause cte : rootClause.asExecutionOrder()) {
    + if (cte.source != null) {
    + writeEntities.addAll(cte.source.getOutputs());
    + }
    + }
    + return writeEntities;
    + }
    +
    + class CTEClause {
    + CTEClause(String alias, ASTNode cteNode) {
    + this.alias = alias;
    + this.cteNode = cteNode;
    + }
    + String alias;
    + ASTNode cteNode;
    + boolean materialize;
    + int reference;
    + QBExpr qbExpr;
    + List<CTEClause> parents = new ArrayList<CTEClause>();
    +
    + // materialized
    + Table table;
    + SemanticAnalyzer source;
    +
    + List<Task<? extends Serializable>> getTasks() {
    + return source == null ? null : source.rootTasks;
    + }
    +
    + List<CTEClause> asExecutionOrder() {
    + List<CTEClause> execution = new ArrayList<CTEClause>();
    + asExecutionOrder(new HashSet<CTEClause>(), execution);
    + return execution;
    + }
    +
    + void asExecutionOrder(Set<CTEClause> visited, List<CTEClause> execution) {
    + for (CTEClause parent : parents) {
    + if (visited.add(parent)) {
    + parent.asExecutionOrder(visited, execution);
    + }
    + }
    + execution.add(this);
    + }
    +
    + @Override
    + public String toString() {
    + return alias == null ? "<root>" : alias;
    + }
    + }
    +
    + private List<Task<? extends Serializable>> toRealRootTasks(List<CTEClause> execution) {
    + List<Task<? extends Serializable>> cteRoots = new ArrayList<>();
    + List<Task<? extends Serializable>> cteLeafs = new ArrayList<>();
    + List<Task<? extends Serializable>> curTopRoots = null;
    + List<Task<? extends Serializable>> curBottomLeafs = null;
    + for (int i = 0; i < execution.size(); i++) {
    + CTEClause current = execution.get(i);
    + if (current.parents.isEmpty() && curTopRoots != null) {
    + cteRoots.addAll(curTopRoots);
    + cteLeafs.addAll(curBottomLeafs);
    + curTopRoots = curBottomLeafs = null;
    + }
    + List<Task<? extends Serializable>> curTasks = current.getTasks();
    + if (curTasks == null) {
    + continue;
    + }
    + if (curTopRoots == null) {
    + curTopRoots = curTasks;
    + }
    + if (curBottomLeafs != null) {
    + for (Task<?> topLeafTask : curBottomLeafs) {
    + for (Task<?> currentRootTask : curTasks) {
    + topLeafTask.addDependentTask(currentRootTask);
    + }
    + }
    + }
    + curBottomLeafs = Task.findLeafs(curTasks);
    + }
    + if (curTopRoots != null) {
    + cteRoots.addAll(curTopRoots);
    + cteLeafs.addAll(curBottomLeafs);
    + }
    +
    + if (cteRoots.isEmpty()) {
    + return rootTasks;
    + }
    + for (Task<?> cteLeafTask : cteLeafs) {
    + for (Task<?> mainRootTask : rootTasks) {
    + cteLeafTask.addDependentTask(mainRootTask);
    + }
    + }
    + return cteRoots;
    + }
    +
    + Table materializeCTE(String cteName, CTEClause cte) throws HiveException {
    +
    + ASTNode createTable = new ASTNode(new ClassicToken(HiveParser.TOK_CREATETABLE));
    +
    + ASTNode tableName = new ASTNode(new ClassicToken(HiveParser.TOK_TABNAME));
    + tableName.addChild(new ASTNode(new ClassicToken(HiveParser.Identifier, cteName)));
    +
    + ASTNode temporary = new ASTNode(new ClassicToken(HiveParser.KW_TEMPORARY, MATERIALIZATION_MARKER));
    +
    + createTable.addChild(tableName);
    + createTable.addChild(temporary);
    + createTable.addChild(cte.cteNode);
    +
    + SemanticAnalyzer analyzer = new SemanticAnalyzer(conf);
    + analyzer.initCtx(ctx);
    + analyzer.init(false);
    +
    + // should share cte contexts
    + analyzer.aliasToCTEs.putAll(aliasToCTEs);
    +
    + HiveOperation operation = SessionState.get().getHiveOperation();
    + try {
    + analyzer.analyzeInternal(createTable);
    + } finally {
    + SessionState.get().setCommandType(operation);
    + }
    +
    + Table table = analyzer.tableDesc.toTable(conf);
    + Path location = table.getDataLocation();
    + try {
    + location.getFileSystem(conf).mkdirs(location);
    + } catch (IOException e) {
    + throw new HiveException(e);
    + }
    + table.setMaterializedTable(true);
    +
    + LOG.info(cteName + " will be materialized into " + location);
    + cte.table = table;
    + cte.source = analyzer;
    +
    + ctx.addMaterializedTable(cteName, table);
    +
    + return table;
    + }
    +
    +
        static boolean isJoinToken(ASTNode node) {
          if ((node.getToken().getType() == HiveParser.TOK_JOIN)
    (node.getToken().getType() == HiveParser.TOK_CROSSJOIN)
    @@ -1538,141 +1716,217 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
          }
        }

    - private void getMetaData(QBExpr qbexpr, ReadEntity parentInput)
    - throws SemanticException {
    + public void getMaterializationMetadata(QB qb) throws SemanticException {
    + try {
    + gatherCTEReferences(qb, rootClause);
    + int threshold = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_CTE_MATERIALIZE_THRESHOLD);
    + for (CTEClause cte : Sets.newHashSet(aliasToCTEs.values())) {
    + if (threshold >= 0 && cte.reference >= threshold) {
    + cte.materialize = true;
    + }
    + }
    + } catch (HiveException e) {
    + // Has to use full name to make sure it does not conflict with
    + // org.apache.commons.lang.StringUtils
    + LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
    + if (e instanceof SemanticException) {
    + throw (SemanticException)e;
    + }
    + throw new SemanticException(e.getMessage(), e);
    + }
    + }
    +
    + private void gatherCTEReferences(QBExpr qbexpr, CTEClause parent) throws HiveException {
          if (qbexpr.getOpcode() == QBExpr.Opcode.NULLOP) {
    - getMetaData(qbexpr.getQB(), parentInput);
    + gatherCTEReferences(qbexpr.getQB(), parent);
          } else {
    - getMetaData(qbexpr.getQBExpr1(), parentInput);
    - getMetaData(qbexpr.getQBExpr2(), parentInput);
    + gatherCTEReferences(qbexpr.getQBExpr1(), parent);
    + gatherCTEReferences(qbexpr.getQBExpr2(), parent);
          }
        }

    - public Table getTable(TableScanOperator ts) {
    - return topToTable.get(ts);
    + // TODO: check view references, too
    + private void gatherCTEReferences(QB qb, CTEClause current) throws HiveException {
    + for (String alias : qb.getTabAliases()) {
    + String tabName = qb.getTabNameForAlias(alias);
    + String cteName = tabName.toLowerCase();
    +
    + CTEClause cte = findCTEFromName(qb, cteName);
    + if (cte != null) {
    + if (ctesExpanded.contains(cteName)) {
    + throw new SemanticException("Recursive cte " + cteName +
    + " detected (cycle: " + StringUtils.join(ctesExpanded, " -> ") +
    + " -> " + cteName + ").");
    + }
    + cte.reference++;
    + current.parents.add(cte);
    + if (cte.qbExpr != null) {
    + continue;
    + }
    + cte.qbExpr = new QBExpr(cteName);
    + doPhase1QBExpr(cte.cteNode, cte.qbExpr, qb.getId(), cteName);
    +
    + ctesExpanded.add(cteName);
    + gatherCTEReferences(cte.qbExpr, cte);
    + ctesExpanded.remove(ctesExpanded.size() - 1);
    + }
    + }
    + for (String alias : qb.getSubqAliases()) {
    + gatherCTEReferences(qb.getSubqForAlias(alias), current);
    + }
        }

        public void getMetaData(QB qb) throws SemanticException {
    - getMetaData(qb, null);
    + getMetaData(qb, false);
        }

    - @SuppressWarnings("nls")
    - public void getMetaData(QB qb, ReadEntity parentInput) throws SemanticException {
    + public void getMetaData(QB qb, boolean enableMaterialization) throws SemanticException {
          try {
    + if (enableMaterialization) {
    + getMaterializationMetadata(qb);
    + }
    + getMetaData(qb, null);
    + } catch (HiveException e) {
    + // Has to use full name to make sure it does not conflict with
    + // org.apache.commons.lang.StringUtils
    + LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
    + if (e instanceof SemanticException) {
    + throw (SemanticException)e;
    + }
    + throw new SemanticException(e.getMessage(), e);
    + }
    + }

    - LOG.info("Get metadata for source tables");
    + private void getMetaData(QBExpr qbexpr, ReadEntity parentInput)
    + throws HiveException {
    + if (qbexpr.getOpcode() == QBExpr.Opcode.NULLOP) {
    + getMetaData(qbexpr.getQB(), parentInput);
    + } else {
    + getMetaData(qbexpr.getQBExpr1(), parentInput);
    + getMetaData(qbexpr.getQBExpr2(), parentInput);
    + }
    + }

    - // Go over the tables and populate the related structures.
    - // We have to materialize the table alias list since we might
    - // modify it in the middle for view rewrite.
    - List<String> tabAliases = new ArrayList<String>(qb.getTabAliases());
    + @SuppressWarnings("nls")
    + private void getMetaData(QB qb, ReadEntity parentInput)
    + throws HiveException {
    + LOG.info("Get metadata for source tables");

    - // Keep track of view alias to view name and read entity
    - // For eg: for a query like 'select * from V3', where V3 -> V2, V2 -> V1, V1 -> T
    - // keeps track of full view name and read entity corresponding to alias V3, V3:V2, V3:V2:V1.
    - // This is needed for tracking the dependencies for inputs, along with their parents.
    - Map<String, ObjectPair<String, ReadEntity>> aliasToViewInfo =
    - new HashMap<String, ObjectPair<String, ReadEntity>>();
    + // Go over the tables and populate the related structures.
    + // We have to materialize the table alias list since we might
    + // modify it in the middle for view rewrite.
    + List<String> tabAliases = new ArrayList<String>(qb.getTabAliases());

    - /*
    - * used to capture view to SQ conversions. This is used to check for
    - * recursive CTE invocations.
    - */
    - Map<String, String> sqAliasToCTEName = new HashMap<String, String>();
    + // Keep track of view alias to view name and read entity
    + // For eg: for a query like 'select * from V3', where V3 -> V2, V2 -> V1, V1 -> T
    + // keeps track of full view name and read entity corresponding to alias V3, V3:V2, V3:V2:V1.
    + // This is needed for tracking the dependencies for inputs, along with their parents.
    + Map<String, ObjectPair<String, ReadEntity>> aliasToViewInfo =
    + new HashMap<String, ObjectPair<String, ReadEntity>>();

    - for (String alias : tabAliases) {
    - String tab_name = qb.getTabNameForAlias(alias);
    + /*
    + * used to capture view to SQ conversions. This is used to check for
    + * recursive CTE invocations.
    + */
    + Map<String, String> sqAliasToCTEName = new HashMap<String, String>();

    - // we first look for this alias from CTE, and then from catalog.
    - /*
    - * if this s a CTE reference: Add its AST as a SubQuery to this QB.
    - */
    - ASTNode cteNode = findCTEFromName(qb, tab_name.toLowerCase());
    - if (cteNode != null) {
    - String cte_name = tab_name.toLowerCase();
    - if (ctesExpanded.contains(cte_name)) {
    - throw new SemanticException("Recursive cte " + tab_name + " detected (cycle: "
    - + StringUtils.join(ctesExpanded, " -> ") + " -> " + tab_name + ").");
    + for (String alias : tabAliases) {
    + String tabName = qb.getTabNameForAlias(alias);
    + String cteName = tabName.toLowerCase();
    +
    + Table tab = db.getTable(tabName, false);
    + if (tab == null ||
    + tab.getDbName().equals(SessionState.get().getCurrentDatabase())) {
    + Table materializedTab = ctx.getMaterializedTable(cteName);
    + if (materializedTab == null) {
    + // we first look for this alias from CTE, and then from catalog.
    + CTEClause cte = findCTEFromName(qb, cteName);
    + if (cte != null) {
    + if (!cte.materialize) {
    + addCTEAsSubQuery(qb, cteName, alias);
    + sqAliasToCTEName.put(alias, cteName);
    + continue;
    + }
    + tab = materializeCTE(cteName, cte);
                }
    - addCTEAsSubQuery(qb, cte_name, alias);
    - sqAliasToCTEName.put(alias, cte_name);
    - continue;
    + } else {
    + tab = materializedTab;
              }
    + }

    - Table tab = db.getTable(tab_name, false);
    - if (tab == null) {
    - ASTNode src = qb.getParseInfo().getSrcForAlias(alias);
    - if (null != src) {
    - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(src));
    - } else {
    - throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(alias));
    - }
    + if (tab == null) {
    + ASTNode src = qb.getParseInfo().getSrcForAlias(alias);
    + if (null != src) {
    + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(src));
    + } else {
    + throw new SemanticException(ErrorMsg.INVALID_TABLE.getMsg(alias));
              }
    + }

    - // Disallow INSERT INTO on bucketized tables
    - boolean isAcid = AcidUtils.isAcidTable(tab);
    - boolean isTableWrittenTo = qb.getParseInfo().isInsertIntoTable(tab.getDbName(), tab.getTableName());
    - if (isTableWrittenTo &&
    - tab.getNumBuckets() > 0 && !isAcid) {
    - throw new SemanticException(ErrorMsg.INSERT_INTO_BUCKETIZED_TABLE.
    - getMsg("Table: " + tab_name));
    - }
    - // Disallow update and delete on non-acid tables
    - if ((updating() || deleting()) && !isAcid && isTableWrittenTo) {
    - //isTableWrittenTo: delete from acidTbl where a in (select id from nonAcidTable)
    - //so only assert this if we are actually writing to this table
    - // Whether we are using an acid compliant transaction manager has already been caught in
    - // UpdateDeleteSemanticAnalyzer, so if we are updating or deleting and getting nonAcid
    - // here, it means the table itself doesn't support it.
    - throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, tab_name);
    - }
    + // Disallow INSERT INTO on bucketized tables
    + boolean isAcid = AcidUtils.isAcidTable(tab);
    + boolean isTableWrittenTo = qb.getParseInfo().isInsertIntoTable(tab.getDbName(), tab.getTableName());
    + if (isTableWrittenTo &&
    + tab.getNumBuckets() > 0 && !isAcid) {
    + throw new SemanticException(ErrorMsg.INSERT_INTO_BUCKETIZED_TABLE.
    + getMsg("Table: " + tabName));
    + }
    + // Disallow update and delete on non-acid tables
    + if ((updating() || deleting()) && !isAcid && isTableWrittenTo) {
    + //isTableWrittenTo: delete from acidTbl where a in (select id from nonAcidTable)
    + //so only assert this if we are actually writing to this table
    + // Whether we are using an acid compliant transaction manager has already been caught in
    + // UpdateDeleteSemanticAnalyzer, so if we are updating or deleting and getting nonAcid
    + // here, it means the table itself doesn't support it.
    + throw new SemanticException(ErrorMsg.ACID_OP_ON_NONACID_TABLE, tabName);
    + }

    - if (tab.isView()) {
    - if (qb.getParseInfo().isAnalyzeCommand()) {
    - throw new SemanticException(ErrorMsg.ANALYZE_VIEW.getMsg());
    - }
    - String fullViewName = tab.getDbName() + "." + tab.getTableName();
    - // Prevent view cycles
    - if (viewsExpanded.contains(fullViewName)) {
    - throw new SemanticException("Recursive view " + fullViewName +
    - " detected (cycle: " + StringUtils.join(viewsExpanded, " -> ") +
    - " -> " + fullViewName + ").");
    - }
    - replaceViewReferenceWithDefinition(qb, tab, tab_name, alias);
    - // This is the last time we'll see the Table objects for views, so add it to the inputs
    - // now
    - ReadEntity viewInput = new ReadEntity(tab, parentInput);
    - viewInput = PlanUtils.addInput(inputs, viewInput);
    - aliasToViewInfo.put(alias, new ObjectPair<String, ReadEntity>(fullViewName, viewInput));
    - viewAliasToInput.put(getAliasId(alias, qb), viewInput);
    - continue;
    - }
    + if (tab.isView()) {
    + if (qb.getParseInfo().isAnalyzeCommand()) {
    + throw new SemanticException(ErrorMsg.ANALYZE_VIEW.getMsg());
    + }
    + String fullViewName = tab.getDbName() + "." + tab.getTableName();
    + // Prevent view cycles
    + if (viewsExpanded.contains(fullViewName)) {
    + throw new SemanticException("Recursive view " + fullViewName +
    + " detected (cycle: " + StringUtils.join(viewsExpanded, " -> ") +
    + " -> " + fullViewName + ").");
    + }
    + replaceViewReferenceWithDefinition(qb, tab, tabName, alias);
    + // This is the last time we'll see the Table objects for views, so add it to the inputs
    + // now
    + ReadEntity viewInput = new ReadEntity(tab, parentInput);
    + viewInput = PlanUtils.addInput(inputs, viewInput);
    + aliasToViewInfo.put(alias, new ObjectPair<String, ReadEntity>(fullViewName, viewInput));
    + viewAliasToInput.put(getAliasId(alias, qb), viewInput);
    + continue;
    + }

    - if (!InputFormat.class.isAssignableFrom(tab.getInputFormatClass())) {
    - throw new SemanticException(generateErrorMessage(
    - qb.getParseInfo().getSrcForAlias(alias),
    - ErrorMsg.INVALID_INPUT_FORMAT_TYPE.getMsg()));
    - }
    + if (!InputFormat.class.isAssignableFrom(tab.getInputFormatClass())) {
    + throw new SemanticException(generateErrorMessage(
    + qb.getParseInfo().getSrcForAlias(alias),
    + ErrorMsg.INVALID_INPUT_FORMAT_TYPE.getMsg()));
    + }

    - qb.getMetaData().setSrcForAlias(alias, tab);
    + qb.getMetaData().setSrcForAlias(alias, tab);

    - if (qb.getParseInfo().isAnalyzeCommand()) {
    - // allow partial partition specification for nonscan since noscan is fast.
    - TableSpec ts = new TableSpec(db, conf, (ASTNode) ast.getChild(0), true, this.noscan);
    - if (ts.specType == SpecType.DYNAMIC_PARTITION) { // dynamic partitions
    - try {
    - ts.partitions = db.getPartitionsByNames(ts.tableHandle, ts.partSpec);
    - } catch (HiveException e) {
    - throw new SemanticException(generateErrorMessage(
    - qb.getParseInfo().getSrcForAlias(alias),
    - "Cannot get partitions for " + ts.partSpec), e);
    - }
    + if (qb.getParseInfo().isAnalyzeCommand()) {
    + // allow partial partition specification for nonscan since noscan is fast.
    + TableSpec ts = new TableSpec(db, conf, (ASTNode) ast.getChild(0), true, this.noscan);
    + if (ts.specType == SpecType.DYNAMIC_PARTITION) { // dynamic partitions
    + try {
    + ts.partitions = db.getPartitionsByNames(ts.tableHandle, ts.partSpec);
    + } catch (HiveException e) {
    + throw new SemanticException(generateErrorMessage(
    + qb.getParseInfo().getSrcForAlias(alias),
    + "Cannot get partitions for " + ts.partSpec), e);
                }
    - // validate partial scan command
    - QBParseInfo qbpi = qb.getParseInfo();
    - if (qbpi.isPartialScanAnalyzeCommand()) {
    - Class<? extends InputFormat> inputFormatClass = null;
    - switch (ts.specType) {
    + }
    + // validate partial scan command
    + QBParseInfo qbpi = qb.getParseInfo();
    + if (qbpi.isPartialScanAnalyzeCommand()) {
    + Class<? extends InputFormat> inputFormatClass = null;
    + switch (ts.specType) {
                  case TABLE_ONLY:
                  case DYNAMIC_PARTITION:
                    inputFormatClass = ts.tableHandle.getInputFormatClass();
    @@ -1682,55 +1936,55 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
                    break;
                  default:
                    assert false;
    - }
    - // throw a HiveException for formats other than rcfile or orcfile.
    - if (!(inputFormatClass.equals(RCFileInputFormat.class) || inputFormatClass
    - .equals(OrcInputFormat.class))) {
    - throw new SemanticException(ErrorMsg.ANALYZE_TABLE_PARTIALSCAN_NON_RCFILE.getMsg());
    - }
                }
    -
    - tab.setTableSpec(ts);
    - qb.getParseInfo().addTableSpec(alias, ts);
    + // throw a HiveException for formats other than rcfile or orcfile.
    + if (!(inputFormatClass.equals(RCFileInputFormat.class) || inputFormatClass
    + .equals(OrcInputFormat.class))) {
    + throw new SemanticException(ErrorMsg.ANALYZE_TABLE_PARTIALSCAN_NON_RCFILE.getMsg());
    + }
              }

    - ReadEntity parentViewInfo = PlanUtils.getParentViewInfo(getAliasId(alias, qb), viewAliasToInput);
    - PlanUtils.addInput(inputs,
    - new ReadEntity(tab, parentViewInfo, parentViewInfo == null));
    + tab.setTableSpec(ts);
    + qb.getParseInfo().addTableSpec(alias, ts);
            }

    - LOG.info("Get metadata for subqueries");
    - // Go over the subqueries and getMetaData for these
    - for (String alias : qb.getSubqAliases()) {
    - boolean wasView = aliasToViewInfo.containsKey(alias);
    - boolean wasCTE = sqAliasToCTEName.containsKey(alias);
    - ReadEntity newParentInput = null;
    - if (wasView) {
    - viewsExpanded.add(aliasToViewInfo.get(alias).getFirst());
    - newParentInput = aliasToViewInfo.get(alias).getSecond();
    - } else if (wasCTE) {
    - ctesExpanded.add(sqAliasToCTEName.get(alias));
    - }
    - QBExpr qbexpr = qb.getSubqForAlias(alias);
    - getMetaData(qbexpr, newParentInput);
    - if (wasView) {
    - viewsExpanded.remove(viewsExpanded.size() - 1);
    - } else if (wasCTE) {
    - ctesExpanded.remove(ctesExpanded.size() - 1);
    - }
    + ReadEntity parentViewInfo = PlanUtils.getParentViewInfo(getAliasId(alias, qb), viewAliasToInput);
    + PlanUtils.addInput(inputs,
    + new ReadEntity(tab, parentViewInfo, parentViewInfo == null));
    + }
    +
    + LOG.info("Get metadata for subqueries");
    + // Go over the subqueries and getMetaData for these
    + for (String alias : qb.getSubqAliases()) {
    + boolean wasView = aliasToViewInfo.containsKey(alias);
    + boolean wasCTE = sqAliasToCTEName.containsKey(alias);
    + ReadEntity newParentInput = null;
    + if (wasView) {
    + viewsExpanded.add(aliasToViewInfo.get(alias).getFirst());
    + newParentInput = aliasToViewInfo.get(alias).getSecond();
    + } else if (wasCTE) {
    + ctesExpanded.add(sqAliasToCTEName.get(alias));
    + }
    + QBExpr qbexpr = qb.getSubqForAlias(alias);
    + getMetaData(qbexpr, newParentInput);
    + if (wasView) {
    + viewsExpanded.remove(viewsExpanded.size() - 1);
    + } else if (wasCTE) {
    + ctesExpanded.remove(ctesExpanded.size() - 1);
            }
    + }

    - RowFormatParams rowFormatParams = new RowFormatParams();
    - StorageFormat storageFormat = new StorageFormat(conf);
    + RowFormatParams rowFormatParams = new RowFormatParams();
    + StorageFormat storageFormat = new StorageFormat(conf);

    - LOG.info("Get metadata for destination tables");
    - // Go over all the destination structures and populate the related
    - // metadata
    - QBParseInfo qbp = qb.getParseInfo();
    + LOG.info("Get metadata for destination tables");
    + // Go over all the destination structures and populate the related
    + // metadata
    + QBParseInfo qbp = qb.getParseInfo();

    - for (String name : qbp.getClauseNamesForDest()) {
    - ASTNode ast = qbp.getDestForClause(name);
    - switch (ast.getToken().getType()) {
    + for (String name : qbp.getClauseNamesForDest()) {
    + ASTNode ast = qbp.getDestForClause(name);
    + switch (ast.getToken().getType()) {
              case HiveParser.TOK_TAB: {
                TableSpec ts = new TableSpec(db, conf, ast);
                if (ts.tableHandle.isView()) {
    @@ -1853,13 +2107,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
              default:
                throw new SemanticException(generateErrorMessage(ast,
                    "Unknown Token Type " + ast.getToken().getType()));
    - }
            }
    - } catch (HiveException e) {
    - // Has to use full name to make sure it does not conflict with
    - // org.apache.commons.lang.StringUtils
    - LOG.error(org.apache.hadoop.util.StringUtils.stringifyException(e));
    - throw new SemanticException(e.getMessage(), e);
          }
        }

    @@ -6188,6 +6436,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
          Table dest_tab = null; // destination table if any
          boolean destTableIsAcid = false; // should the destination table be written to using ACID
          boolean destTableIsTemporary = false;
    + boolean destTableIsMaterialization = false;
          Partition dest_part = null;// destination partition if any
          Path queryTmpdir = null; // the intermediate destination directory
          Path dest_path = null; // the final destination directory
    @@ -6447,6 +6696,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
            if (tblDesc != null) {
              field_schemas = new ArrayList<FieldSchema>();
              destTableIsTemporary = tblDesc.isTemporary();
    + destTableIsMaterialization = tblDesc.isMaterialization();
            }

            boolean first = true;
    @@ -6604,6 +6854,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
          }

          fileSinkDesc.setTemporary(destTableIsTemporary);
    + fileSinkDesc.setMaterialization(destTableIsMaterialization);

          /* Set List Bucketing context. */
          if (lbCtx != null) {
    @@ -6621,7 +6872,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
          // can be changed in the optimizer but the key should not be changed
          // it should be the same as the MoveWork's sourceDir.
          fileSinkDesc.setStatsAggPrefix(fileSinkDesc.getDirName().toString());
    - if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) {
    + if (!destTableIsMaterialization &&
    + HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) {
            String statsTmpLoc = ctx.getExtTmpPathRelTo(queryTmpdir).toString();
            fileSinkDesc.setStatsTmpDir(statsTmpLoc);
            LOG.debug("Set stats collection dir : " + statsTmpLoc);
    @@ -10036,7 +10288,8 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
          LOG.info("Completed phase 1 of Semantic Analysis");

          // 5. Resolve Parse Tree
    - getMetaData(qb);
    + // Materialization is allowed if it is not a view definition
    + getMetaData(qb, createVwDesc == null);
          LOG.info("Completed getting MetaData in Semantic Analysis");

          plannerCtx.setParseTreeAttr(child, ctx_1);
    @@ -10746,6 +10999,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
          boolean ifNotExists = false;
          boolean isExt = false;
          boolean isTemporary = false;
    + boolean isMaterialization = false;
          ASTNode selectStmt = null;
          final int CREATE_TABLE = 0; // regular CREATE TABLE
          final int CTLT = 1; // CREATE TABLE LIKE ... (CTLT)
    @@ -10785,6 +11039,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
              break;
            case HiveParser.KW_TEMPORARY:
              isTemporary = true;
    + isMaterialization = MATERIALIZATION_MARKER.equals(child.getText());
              break;
            case HiveParser.TOK_LIKETABLE:
              if (child.getChildCount() > 0) {
    @@ -10974,17 +11229,19 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
          case CTAS: // create table as select

            if (isTemporary) {
    - String dbName = qualifiedTabName[0];
    - String tblName = qualifiedTabName[1];
    - SessionState ss = SessionState.get();
    - if (ss == null) {
    - throw new SemanticException("No current SessionState, cannot create temporary table "
    - + dbName + "." + tblName);
    - }
    - Map<String, Table> tables = SessionHiveMetaStoreClient.getTempTablesForDatabase(dbName);
    - if (tables != null && tables.containsKey(tblName) && !ctx.getExplain()) {
    - throw new SemanticException("Temporary table " + dbName + "." + tblName
    - + " already exists");
    + if (!ctx.getExplain() && !isMaterialization) {
    + String dbName = qualifiedTabName[0];
    + String tblName = qualifiedTabName[1];
    + SessionState ss = SessionState.get();
    + if (ss == null) {
    + throw new SemanticException("No current SessionState, cannot create temporary table "
    + + dbName + "." + tblName);
    + }
    + Map<String, Table> tables = SessionHiveMetaStoreClient.getTempTablesForDatabase(dbName);
    + if (tables != null && tables.containsKey(tblName)) {
    + throw new SemanticException("Temporary table " + dbName + "." + tblName
    + + " already exists");
    + }
              }
            } else {
              // Verify that the table does not already exist
    @@ -11033,6 +11290,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer {
                storageFormat.getOutputFormat(), location, storageFormat.getSerde(),
                storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists,
                skewedColNames, skewedValues);
    + tableDesc.setMaterialization(isMaterialization);
            tableDesc.setStoredAsSubDirectories(storedAsDirs);
            tableDesc.setNullFormat(rowFormatParams.nullFormat);
            qb.setTableDesc(tableDesc);

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
    index 89897d7..fc555ca 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java
    @@ -228,7 +228,7 @@ public abstract class TaskCompiler {

          decideExecMode(rootTasks, ctx, globalLimitCtx);

    - if (pCtx.getQueryProperties().isCTAS()) {
    + if (pCtx.getQueryProperties().isCTAS() && !pCtx.getCreateTable().isMaterialization()) {
            // generate a DDL task and make it a dependent task of the leaf
            CreateTableDesc crtTblDesc = pCtx.getCreateTable();


    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
    index 2c6bb21..8b2ac3b 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java
    @@ -24,23 +24,30 @@ import java.util.Iterator;
      import java.util.List;
      import java.util.Map;

    -import org.slf4j.Logger;
    -import org.slf4j.LoggerFactory;
    +import org.apache.hadoop.fs.Path;
      import org.apache.hadoop.hive.conf.HiveConf;
    +import org.apache.hadoop.hive.metastore.TableType;
      import org.apache.hadoop.hive.metastore.api.FieldSchema;
      import org.apache.hadoop.hive.metastore.api.Order;
      import org.apache.hadoop.hive.ql.ErrorMsg;
    +import org.apache.hadoop.hive.ql.exec.DDLTask;
      import org.apache.hadoop.hive.ql.exec.Utilities;
      import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils;
      import org.apache.hadoop.hive.ql.io.HiveOutputFormat;
    +import org.apache.hadoop.hive.ql.metadata.HiveException;
    +import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
      import org.apache.hadoop.hive.ql.metadata.Table;
      import org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer;
      import org.apache.hadoop.hive.ql.parse.ParseUtils;
      import org.apache.hadoop.hive.ql.parse.SemanticException;
    +import org.apache.hadoop.hive.ql.plan.Explain.Level;
    +import org.apache.hadoop.hive.serde.serdeConstants;
    +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe;
      import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
      import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory;
      import org.apache.hadoop.mapred.OutputFormat;
    -import org.apache.hadoop.hive.ql.plan.Explain.Level;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;


      /**
    @@ -78,6 +85,7 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
        List<List<String>> skewedColValues;
        boolean isStoredAsSubDirectories = false;
        boolean isTemporary = false;
    + private boolean isMaterialization = false;
        private boolean replaceMode = false;

        public CreateTableDesc() {
    @@ -553,6 +561,21 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
        }

        /**
    + * @return the isMaterialization
    + */
    + @Explain(displayName = "isMaterialization", displayOnlyOnTrue = true)
    + public boolean isMaterialization() {
    + return isMaterialization;
    + }
    +
    + /**
    + * @param isMaterialization table is a materialization or not.
    + */
    + public void setMaterialization(boolean isMaterialization) {
    + this.isMaterialization = isMaterialization;
    + }
    +
    + /**
         * @param replaceMode Determine if this CreateTable should behave like a replace-into alter instead
         */
        public void setReplaceMode(boolean replaceMode) {
    @@ -565,4 +588,170 @@ public class CreateTableDesc extends DDLDesc implements Serializable {
        public boolean getReplaceMode() {
          return replaceMode;
        }
    +
    + public Table toTable(HiveConf conf) throws HiveException {
    + String databaseName = getDatabaseName();
    + String tableName = getTableName();
    +
    + if (databaseName == null || tableName.contains(".")) {
    + String[] names = Utilities.getDbTableName(tableName);
    + databaseName = names[0];
    + tableName = names[1];
    + }
    +
    + Table tbl = new Table(databaseName, tableName);
    +
    + if (getTblProps() != null) {
    + tbl.getTTable().getParameters().putAll(getTblProps());
    + }
    +
    + if (getPartCols() != null) {
    + tbl.setPartCols(getPartCols());
    + }
    + if (getNumBuckets() != -1) {
    + tbl.setNumBuckets(getNumBuckets());
    + }
    +
    + if (getStorageHandler() != null) {
    + tbl.setProperty(
    + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE,
    + getStorageHandler());
    + }
    + HiveStorageHandler storageHandler = tbl.getStorageHandler();
    +
    + /*
    + * We use LazySimpleSerDe by default.
    + *
    + * If the user didn't specify a SerDe, and any of the columns are not simple
    + * types, we will have to use DynamicSerDe instead.
    + */
    + if (getSerName() == null) {
    + if (storageHandler == null) {
    + LOG.info("Default to LazySimpleSerDe for table " + tableName);
    + tbl.setSerializationLib(LazySimpleSerDe.class.getName());
    + } else {
    + String serDeClassName = storageHandler.getSerDeClass().getName();
    + LOG.info("Use StorageHandler-supplied " + serDeClassName
    + + " for table " + tableName);
    + tbl.setSerializationLib(serDeClassName);
    + }
    + } else {
    + // let's validate that the serde exists
    + DDLTask.validateSerDe(getSerName(), conf);
    + tbl.setSerializationLib(getSerName());
    + }
    +
    + if (getFieldDelim() != null) {
    + tbl.setSerdeParam(serdeConstants.FIELD_DELIM, getFieldDelim());
    + tbl.setSerdeParam(serdeConstants.SERIALIZATION_FORMAT, getFieldDelim());
    + }
    + if (getFieldEscape() != null) {
    + tbl.setSerdeParam(serdeConstants.ESCAPE_CHAR, getFieldEscape());
    + }
    +
    + if (getCollItemDelim() != null) {
    + tbl.setSerdeParam(serdeConstants.COLLECTION_DELIM, getCollItemDelim());
    + }
    + if (getMapKeyDelim() != null) {
    + tbl.setSerdeParam(serdeConstants.MAPKEY_DELIM, getMapKeyDelim());
    + }
    + if (getLineDelim() != null) {
    + tbl.setSerdeParam(serdeConstants.LINE_DELIM, getLineDelim());
    + }
    + if (getNullFormat() != null) {
    + tbl.setSerdeParam(serdeConstants.SERIALIZATION_NULL_FORMAT, getNullFormat());
    + }
    + if (getSerdeProps() != null) {
    + Iterator<Map.Entry<String, String>> iter = getSerdeProps().entrySet()
    + .iterator();
    + while (iter.hasNext()) {
    + Map.Entry<String, String> m = iter.next();
    + tbl.setSerdeParam(m.getKey(), m.getValue());
    + }
    + }
    +
    + if (getCols() != null) {
    + tbl.setFields(getCols());
    + }
    + if (getBucketCols() != null) {
    + tbl.setBucketCols(getBucketCols());
    + }
    + if (getSortCols() != null) {
    + tbl.setSortCols(getSortCols());
    + }
    + if (getComment() != null) {
    + tbl.setProperty("comment", getComment());
    + }
    + if (getLocation() != null) {
    + tbl.setDataLocation(new Path(getLocation()));
    + }
    +
    + if (getSkewedColNames() != null) {
    + tbl.setSkewedColNames(getSkewedColNames());
    + }
    + if (getSkewedColValues() != null) {
    + tbl.setSkewedColValues(getSkewedColValues());
    + }
    +
    + tbl.getTTable().setTemporary(isTemporary());
    +
    + tbl.setStoredAsSubDirectories(isStoredAsSubDirectories());
    +
    + tbl.setInputFormatClass(getInputFormat());
    + tbl.setOutputFormatClass(getOutputFormat());
    +
    + // only persist input/output format to metadata when it is explicitly specified.
    + // Otherwise, load lazily via StorageHandler at query time.
    + if (getInputFormat() != null && !getInputFormat().isEmpty()) {
    + tbl.getTTable().getSd().setInputFormat(tbl.getInputFormatClass().getName());
    + }
    + if (getOutputFormat() != null && !getOutputFormat().isEmpty()) {
    + tbl.getTTable().getSd().setOutputFormat(tbl.getOutputFormatClass().getName());
    + }
    +
    + if (!Utilities.isDefaultNameNode(conf) && DDLTask.doesTableNeedLocation(tbl)) {
    + // If location is specified - ensure that it is a full qualified name
    + DDLTask.makeLocationQualified(tbl.getDbName(), tbl.getTTable().getSd(), tableName, conf);
    + }
    +
    + if (isExternal()) {
    + tbl.setProperty("EXTERNAL", "TRUE");
    + tbl.setTableType(TableType.EXTERNAL_TABLE);
    + }
    +
    + // If the sorted columns is a superset of bucketed columns, store this fact.
    + // It can be later used to
    + // optimize some group-by queries. Note that, the order does not matter as
    + // long as it in the first
    + // 'n' columns where 'n' is the length of the bucketed columns.
    + if ((tbl.getBucketCols() != null) && (tbl.getSortCols() != null)) {
    + List<String> bucketCols = tbl.getBucketCols();
    + List<Order> sortCols = tbl.getSortCols();
    +
    + if ((sortCols.size() > 0) && (sortCols.size() >= bucketCols.size())) {
    + boolean found = true;
    +
    + Iterator<String> iterBucketCols = bucketCols.iterator();
    + while (iterBucketCols.hasNext()) {
    + String bucketCol = iterBucketCols.next();
    + boolean colFound = false;
    + for (int i = 0; i < bucketCols.size(); i++) {
    + if (bucketCol.equals(sortCols.get(i).getCol())) {
    + colFound = true;
    + break;
    + }
    + }
    + if (colFound == false) {
    + found = false;
    + break;
    + }
    + }
    + if (found) {
    + tbl.setProperty("SORTBUCKETCOLSPREFIX", "TRUE");
    + }
    + }
    + }
    + return tbl;
    + }
    +
      }

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
    index cc462be..07fd2dc 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java
    @@ -51,6 +51,7 @@ public class FileSinkDesc extends AbstractOperatorDesc {
        private String compressType;
        private boolean multiFileSpray;
        private boolean temporary;
    + private boolean materialization;
        // Whether the files output by this FileSink can be merged, e.g. if they are to be put into a
        // bucketed or sorted table/partition they cannot be merged.
        private boolean canBeMerged;
    @@ -241,6 +242,14 @@ public class FileSinkDesc extends AbstractOperatorDesc {
          this.temporary = temporary;
        }

    + public boolean isMaterialization() {
    + return materialization;
    + }
    +
    + public void setMaterialization(boolean materialization) {
    + this.materialization = materialization;
    + }
    +

        public boolean canBeMerged() {
          return canBeMerged;

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
    index ea506fc..9d139ba 100644
    --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
    +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java
    @@ -18,12 +18,16 @@

      package org.apache.hadoop.hive.ql.stats;

    -import com.google.common.base.Joiner;
    -import com.google.common.collect.Lists;
    -import com.google.common.math.LongMath;
    +import java.math.BigDecimal;
    +import java.math.BigInteger;
    +import java.util.ArrayList;
    +import java.util.Collections;
    +import java.util.HashSet;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.Map.Entry;
    +import java.util.Set;

    -import org.slf4j.Logger;
    -import org.slf4j.LoggerFactory;
      import org.apache.hadoop.conf.Configuration;
      import org.apache.hadoop.fs.FileSystem;
      import org.apache.hadoop.fs.Path;
    @@ -44,7 +48,6 @@ import org.apache.hadoop.hive.ql.metadata.Hive;
      import org.apache.hadoop.hive.ql.metadata.HiveException;
      import org.apache.hadoop.hive.ql.metadata.Partition;
      import org.apache.hadoop.hive.ql.metadata.Table;
    -import org.apache.hadoop.hive.ql.optimizer.stats.annotation.StatsRulesProcFactory;
      import org.apache.hadoop.hive.ql.parse.PrunedPartitionList;
      import org.apache.hadoop.hive.ql.parse.SemanticException;
      import org.apache.hadoop.hive.ql.plan.ColStatistics;
    @@ -93,16 +96,12 @@ import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo;
      import org.apache.hadoop.io.BytesWritable;
      import org.apache.hive.common.util.AnnotationUtils;
      import org.apache.tez.mapreduce.hadoop.MRJobConfig;
    +import org.slf4j.Logger;
    +import org.slf4j.LoggerFactory;

    -import java.math.BigDecimal;
    -import java.math.BigInteger;
    -import java.util.ArrayList;
    -import java.util.Collections;
    -import java.util.HashSet;
    -import java.util.List;
    -import java.util.Map;
    -import java.util.Map.Entry;
    -import java.util.Set;
    +import com.google.common.base.Joiner;
    +import com.google.common.collect.Lists;
    +import com.google.common.math.LongMath;

      public class StatsUtils {

    @@ -745,6 +744,10 @@ public class StatsUtils {
         */
        public static List<ColStatistics> getTableColumnStats(
            Table table, List<ColumnInfo> schema, List<String> neededColumns) {
    + if (table.isMaterializedTable()) {
    + LOG.debug("Materialized table does not contain table statistics");
    + return null;
    + }
          String dbName = table.getDbName();
          String tabName = table.getTableName();
          List<String> neededColsInTable = processNeededColumns(schema, neededColumns);
  • Jcamacho at Feb 12, 2016 at 6:41 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java
    index be51edc..fc42aaa 100644
    --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java
    +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestGenTezWork.java
    @@ -29,6 +29,7 @@ import java.util.LinkedHashMap;

      import org.apache.hadoop.hive.conf.HiveConf;
      import org.apache.hadoop.hive.ql.CompilationOpContext;
    +import org.apache.hadoop.hive.ql.Context;
      import org.apache.hadoop.hive.ql.exec.FileSinkOperator;
      import org.apache.hadoop.hive.ql.exec.Operator;
      import org.apache.hadoop.hive.ql.exec.ReduceSinkOperator;
    @@ -42,6 +43,7 @@ import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc;
      import org.apache.hadoop.hive.ql.plan.ReduceWork;
      import org.apache.hadoop.hive.ql.plan.TableScanDesc;
      import org.apache.hadoop.hive.ql.plan.TezWork;
    +import org.apache.hadoop.hive.ql.session.SessionState;
      import org.junit.After;
      import org.junit.Before;
      import org.junit.Test;
    @@ -64,9 +66,17 @@ public class TestGenTezWork {
        @SuppressWarnings("unchecked")
        @Before
        public void setUp() throws Exception {
    + // Init conf
    + final HiveConf conf = new HiveConf(SemanticAnalyzer.class);
    + SessionState.start(conf);
    +
    + // Init parse context
    + final ParseContext pctx = new ParseContext();
    + pctx.setContext(new Context(conf));
    +
          ctx = new GenTezProcContext(
    - new HiveConf(),
    - new ParseContext(),
    + conf,
    + pctx,
              Collections.EMPTY_LIST,
              new ArrayList<Task<? extends Serializable>>(),
              Collections.EMPTY_SET,

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/queries/clientpositive/cte_3.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/cte_3.q b/ql/src/test/queries/clientpositive/cte_3.q
    new file mode 100644
    index 0000000..aa8adbc
    --- /dev/null
    +++ b/ql/src/test/queries/clientpositive/cte_3.q
    @@ -0,0 +1,31 @@
    +set hive.mapred.mode=nonstrict;
    +set hive.optimize.cte.materialize.threshold=1;
    +
    +explain
    +with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +;
    +
    +with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +;
    +
    +-- in subquery
    +explain
    +with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a;
    +
    +with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a;
    +
    +-- chaining
    +explain
    +with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a;
    +
    +with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a;

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/queries/clientpositive/cte_4.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/cte_4.q b/ql/src/test/queries/clientpositive/cte_4.q
    new file mode 100644
    index 0000000..0455a6a
    --- /dev/null
    +++ b/ql/src/test/queries/clientpositive/cte_4.q
    @@ -0,0 +1,56 @@
    +set hive.mapred.mode=nonstrict;
    +set hive.optimize.cte.materialize.threshold=1;
    +
    +-- union test
    +with q1 as (select * from src where key= '5'),
    +q2 as (select * from src s2 where key = '4')
    +select * from q1 union all select * from q2
    +;
    +
    +-- insert test
    +create table s1 like src;
    +with q1 as ( select key, value from src where key = '5')
    +from q1
    +insert overwrite table s1
    +select *
    +;
    +select * from s1;
    +drop table s1;
    +
    +-- from style
    +with q1 as (select * from src where key= '5')
    +from q1
    +select *
    +;
    +
    +-- ctas
    +create table s2 as
    +with q1 as ( select key from src where key = '4')
    +select * from q1
    +;
    +
    +select * from s2;
    +drop table s2;
    +
    +-- view test
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +;
    +
    +select * from v1;
    +
    +drop view v1;
    +
    +
    +-- view test, name collision
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +;
    +
    +with q1 as ( select key from src where key = '4')
    +select * from v1
    +;
    +
    +drop view v1;

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/queries/clientpositive/cte_5.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/cte_5.q b/ql/src/test/queries/clientpositive/cte_5.q
    new file mode 100644
    index 0000000..8968688
    --- /dev/null
    +++ b/ql/src/test/queries/clientpositive/cte_5.q
    @@ -0,0 +1,23 @@
    +set hive.mapred.mode=nonstrict;
    +set hive.optimize.cte.materialize.threshold=-1;
    +
    +create database mydb;
    +use mydb;
    +create table q1 (colnum int, colstring string);
    +insert into q1 values (5, 'A');
    +
    +use default;
    +
    +show tables in mydb;
    +show tables;
    +
    +explain
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key;
    +
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key;

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/queries/clientpositive/cte_mat_1.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/cte_mat_1.q b/ql/src/test/queries/clientpositive/cte_mat_1.q
    new file mode 100644
    index 0000000..2afb960
    --- /dev/null
    +++ b/ql/src/test/queries/clientpositive/cte_mat_1.q
    @@ -0,0 +1,8 @@
    +set hive.mapred.mode=nonstrict;
    +set hive.optimize.cte.materialize.threshold=-1;
    +
    +explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key;

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/queries/clientpositive/cte_mat_2.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/cte_mat_2.q b/ql/src/test/queries/clientpositive/cte_mat_2.q
    new file mode 100644
    index 0000000..adcd087
    --- /dev/null
    +++ b/ql/src/test/queries/clientpositive/cte_mat_2.q
    @@ -0,0 +1,8 @@
    +set hive.mapred.mode=nonstrict;
    +set hive.optimize.cte.materialize.threshold=3;
    +
    +explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key;

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/queries/clientpositive/cte_mat_3.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/cte_mat_3.q b/ql/src/test/queries/clientpositive/cte_mat_3.q
    new file mode 100644
    index 0000000..650cc24
    --- /dev/null
    +++ b/ql/src/test/queries/clientpositive/cte_mat_3.q
    @@ -0,0 +1,8 @@
    +set hive.mapred.mode=nonstrict;
    +set hive.optimize.cte.materialize.threshold=2;
    +
    +explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key;

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/queries/clientpositive/cte_mat_4.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/cte_mat_4.q b/ql/src/test/queries/clientpositive/cte_mat_4.q
    new file mode 100644
    index 0000000..2d75963
    --- /dev/null
    +++ b/ql/src/test/queries/clientpositive/cte_mat_4.q
    @@ -0,0 +1,39 @@
    +set hive.mapred.mode=nonstrict;
    +set hive.optimize.cte.materialize.threshold=2;
    +
    +create temporary table q1 (a int, b string);
    +insert into q1 values (1, 'A');
    +
    +show tables;
    +
    +explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key;
    +
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key;
    +
    +show tables;
    +
    +select * from q1;
    +
    +drop table q1;
    +
    +show tables;
    +
    +explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key;
    +
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key;
    +
    +show tables;

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/queries/clientpositive/cte_mat_5.q
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/queries/clientpositive/cte_mat_5.q b/ql/src/test/queries/clientpositive/cte_mat_5.q
    new file mode 100644
    index 0000000..fd0aeda
    --- /dev/null
    +++ b/ql/src/test/queries/clientpositive/cte_mat_5.q
    @@ -0,0 +1,23 @@
    +set hive.mapred.mode=nonstrict;
    +set hive.optimize.cte.materialize.threshold=1;
    +
    +create database mydb;
    +use mydb;
    +create table q1 (colnum int, colstring string);
    +insert into q1 values (5, 'A');
    +
    +use default;
    +
    +show tables in mydb;
    +show tables;
    +
    +explain
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key;
    +
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key;

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientnegative/analyze1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientnegative/analyze1.q.out b/ql/src/test/results/clientnegative/analyze1.q.out
    index 589a6ee..3230b02 100644
    --- a/ql/src/test/results/clientnegative/analyze1.q.out
    +++ b/ql/src/test/results/clientnegative/analyze1.q.out
    @@ -1 +1 @@
    -FAILED: SemanticException Partition spec {key=null} contains non-partition columns
    +FAILED: ValidationFailureSemanticException Partition spec {key=null} contains non-partition columns

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientnegative/dyn_part1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientnegative/dyn_part1.q.out b/ql/src/test/results/clientnegative/dyn_part1.q.out
    index 62d01fb..4c8e171 100644
    --- a/ql/src/test/results/clientnegative/dyn_part1.q.out
    +++ b/ql/src/test/results/clientnegative/dyn_part1.q.out
    @@ -6,4 +6,4 @@ POSTHOOK: query: create table dynamic_partition (key string) partitioned by (val
      POSTHOOK: type: CREATETABLE
      POSTHOOK: Output: database:default
      POSTHOOK: Output: default@dynamic_partition
    -FAILED: SemanticException Partition spec {hr=null} contains non-partition columns
    +FAILED: ValidationFailureSemanticException Partition spec {hr=null} contains non-partition columns

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/alter_view_as_select.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/alter_view_as_select.q.out b/ql/src/test/results/clientpositive/alter_view_as_select.q.out
    index c89c0dc..3666221 100644
    --- a/ql/src/test/results/clientpositive/alter_view_as_select.q.out
    +++ b/ql/src/test/results/clientpositive/alter_view_as_select.q.out
    @@ -57,7 +57,6 @@ POSTHOOK: type: CREATEVIEW
      POSTHOOK: Input: default@src
      POSTHOOK: Output: database:tv
      POSTHOOK: Output: tv@testView
    -POSTHOOK: Output: tv@testview
      PREHOOK: query: DESCRIBE FORMATTED tv.testView
      PREHOOK: type: DESCTABLE
      PREHOOK: Input: tv@testview
    @@ -106,7 +105,6 @@ POSTHOOK: type: CREATEVIEW
      POSTHOOK: Input: default@src
      POSTHOOK: Output: database:tv
      POSTHOOK: Output: tv@testView
    -POSTHOOK: Output: tv@testview
      PREHOOK: query: DESCRIBE FORMATTED tv.testView
      PREHOOK: type: DESCTABLE
      PREHOOK: Input: tv@testview

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/cte_3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/cte_3.q.out b/ql/src/test/results/clientpositive/cte_3.q.out
    new file mode 100644
    index 0000000..0fe0865
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/cte_3.q.out
    @@ -0,0 +1,444 @@
    +PREHOOK: query: explain
    +with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4
    + Stage-3
    + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5
    + Stage-2
    + Stage-4
    + Stage-5 depends on stages: Stage-4
    + Stage-7 is a root stage
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-6
    + Conditional Operator
    +
    + Stage: Stage-3
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-2
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-4
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-5
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-7
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + TableScan
    + alias: q1
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + ListSink
    +
    +PREHOOK: query: with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: -- in subquery
    +explain
    +with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +POSTHOOK: query: -- in subquery
    +explain
    +with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4
    + Stage-3
    + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5
    + Stage-2
    + Stage-4
    + Stage-5 depends on stages: Stage-4
    + Stage-7 is a root stage
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-6
    + Conditional Operator
    +
    + Stage: Stage-3
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-2
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-4
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-5
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-7
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + TableScan
    + alias: q1
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + ListSink
    +
    +PREHOOK: query: with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: -- chaining
    +explain
    +with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +POSTHOOK: query: -- chaining
    +explain
    +with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4
    + Stage-3
    + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5
    + Stage-8 depends on stages: Stage-0
    + Stage-13 depends on stages: Stage-8 , consists of Stage-10, Stage-9, Stage-11
    + Stage-10
    + Stage-7 depends on stages: Stage-10, Stage-9, Stage-12
    + Stage-9
    + Stage-11
    + Stage-12 depends on stages: Stage-11
    + Stage-2
    + Stage-4
    + Stage-5 depends on stages: Stage-4
    + Stage-14 is a root stage
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q2
    +
    + Stage: Stage-6
    + Conditional Operator
    +
    + Stage: Stage-3
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-8
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: q2
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-13
    + Conditional Operator
    +
    + Stage: Stage-10
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-7
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-9
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-11
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-12
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-2
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q2
    +
    + Stage: Stage-4
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q2
    +
    + Stage: Stage-5
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-14
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + TableScan
    + alias: q1
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + ListSink
    +
    +PREHOOK: query: with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@q2
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +PREHOOK: Output: default@q2
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@q2
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Output: default@q2
    +#### A masked pattern was here ####
    +5
    +5
    +5

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/cte_4.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/cte_4.q.out b/ql/src/test/results/clientpositive/cte_4.q.out
    new file mode 100644
    index 0000000..d560d74
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/cte_4.q.out
    @@ -0,0 +1,219 @@
    +PREHOOK: query: -- union test
    +with q1 as (select * from src where key= '5'),
    +q2 as (select * from src s2 where key = '4')
    +select * from q1 union all select * from q2
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@q2
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +PREHOOK: Output: default@q2
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- union test
    +with q1 as (select * from src where key= '5'),
    +q2 as (select * from src s2 where key = '4')
    +select * from q1 union all select * from q2
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@q2
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Output: default@q2
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +4 val_4
    +PREHOOK: query: -- insert test
    +create table s1 like src
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@s1
    +POSTHOOK: query: -- insert test
    +create table s1 like src
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@s1
    +PREHOOK: query: with q1 as ( select key, value from src where key = '5')
    +from q1
    +insert overwrite table s1
    +select *
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +PREHOOK: Output: default@s1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key, value from src where key = '5')
    +from q1
    +insert overwrite table s1
    +select *
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Output: default@s1
    +#### A masked pattern was here ####
    +POSTHOOK: Lineage: s1.key SIMPLE [(q1)q1.FieldSchema(name:key, type:string, comment:null), ]
    +POSTHOOK: Lineage: s1.value SIMPLE [(q1)q1.FieldSchema(name:value, type:string, comment:null), ]
    +PREHOOK: query: select * from s1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@s1
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from s1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@s1
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +PREHOOK: query: drop table s1
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@s1
    +PREHOOK: Output: default@s1
    +POSTHOOK: query: drop table s1
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@s1
    +POSTHOOK: Output: default@s1
    +PREHOOK: query: -- from style
    +with q1 as (select * from src where key= '5')
    +from q1
    +select *
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- from style
    +with q1 as (select * from src where key= '5')
    +from q1
    +select *
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +PREHOOK: query: -- ctas
    +create table s2 as
    +with q1 as ( select key from src where key = '4')
    +select * from q1
    +PREHOOK: type: CREATETABLE_AS_SELECT
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +PREHOOK: Output: default@s2
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- ctas
    +create table s2 as
    +with q1 as ( select key from src where key = '4')
    +select * from q1
    +POSTHOOK: type: CREATETABLE_AS_SELECT
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Output: default@s2
    +#### A masked pattern was here ####
    +PREHOOK: query: select * from s2
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@s2
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from s2
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@s2
    +#### A masked pattern was here ####
    +4
    +PREHOOK: query: drop table s2
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@s2
    +PREHOOK: Output: default@s2
    +POSTHOOK: query: drop table s2
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@s2
    +POSTHOOK: Output: default@s2
    +PREHOOK: query: -- view test
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +PREHOOK: type: CREATEVIEW
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: -- view test
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +POSTHOOK: type: CREATEVIEW
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: select * from v1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from v1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: drop view v1
    +PREHOOK: type: DROPVIEW
    +PREHOOK: Input: default@v1
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: drop view v1
    +POSTHOOK: type: DROPVIEW
    +POSTHOOK: Input: default@v1
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: -- view test, name collision
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +PREHOOK: type: CREATEVIEW
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: -- view test, name collision
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +POSTHOOK: type: CREATEVIEW
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: with q1 as ( select key from src where key = '4')
    +select * from v1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '4')
    +select * from v1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: drop view v1
    +PREHOOK: type: DROPVIEW
    +PREHOOK: Input: default@v1
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: drop view v1
    +POSTHOOK: type: DROPVIEW
    +POSTHOOK: Input: default@v1
    +POSTHOOK: Output: default@v1

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/cte_5.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/cte_5.q.out b/ql/src/test/results/clientpositive/cte_5.q.out
    new file mode 100644
    index 0000000..44a3282
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/cte_5.q.out
    @@ -0,0 +1,156 @@
    +PREHOOK: query: create database mydb
    +PREHOOK: type: CREATEDATABASE
    +PREHOOK: Output: database:mydb
    +POSTHOOK: query: create database mydb
    +POSTHOOK: type: CREATEDATABASE
    +POSTHOOK: Output: database:mydb
    +PREHOOK: query: use mydb
    +PREHOOK: type: SWITCHDATABASE
    +PREHOOK: Input: database:mydb
    +POSTHOOK: query: use mydb
    +POSTHOOK: type: SWITCHDATABASE
    +POSTHOOK: Input: database:mydb
    +PREHOOK: query: create table q1 (colnum int, colstring string)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:mydb
    +PREHOOK: Output: mydb@q1
    +POSTHOOK: query: create table q1 (colnum int, colstring string)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:mydb
    +POSTHOOK: Output: mydb@q1
    +PREHOOK: query: insert into q1 values (5, 'A')
    +PREHOOK: type: QUERY
    +PREHOOK: Input: mydb@values__tmp__table__1
    +PREHOOK: Output: mydb@q1
    +POSTHOOK: query: insert into q1 values (5, 'A')
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: mydb@values__tmp__table__1
    +POSTHOOK: Output: mydb@q1
    +POSTHOOK: Lineage: q1.colnum EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
    +POSTHOOK: Lineage: q1.colstring SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
    +PREHOOK: query: use default
    +PREHOOK: type: SWITCHDATABASE
    +PREHOOK: Input: database:default
    +POSTHOOK: query: use default
    +POSTHOOK: type: SWITCHDATABASE
    +POSTHOOK: Input: database:default
    +PREHOOK: query: show tables in mydb
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:mydb
    +POSTHOOK: query: show tables in mydb
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:mydb
    +q1
    +values__tmp__table__1
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: colnum is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: colnum (type: int)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: UDFToDouble(_col0) (type: double)
    + sort order: +
    + Map-reduce partition columns: UDFToDouble(_col0) (type: double)
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + value expressions: _col0 (type: int)
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: 5.0 (type: double)
    + sort order: +
    + Map-reduce partition columns: 5.0 (type: double)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Operator Tree:
    + Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 UDFToDouble(_col0) (type: double)
    + 1 UDFToDouble('5') (type: double)
    + outputColumnNames: _col0
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: mydb@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: mydb@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/cte_mat_1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/cte_mat_1.q.out b/ql/src/test/results/clientpositive/cte_mat_1.q.out
    new file mode 100644
    index 0000000..6429470
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/cte_mat_1.q.out
    @@ -0,0 +1,72 @@
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: '5' (type: string)
    + sort order: +
    + Map-reduce partition columns: '5' (type: string)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: '5' (type: string)
    + sort order: +
    + Map-reduce partition columns: '5' (type: string)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Operator Tree:
    + Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 '5' (type: string)
    + 1 '5' (type: string)
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/cte_mat_2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/cte_mat_2.q.out b/ql/src/test/results/clientpositive/cte_mat_2.q.out
    new file mode 100644
    index 0000000..6429470
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/cte_mat_2.q.out
    @@ -0,0 +1,72 @@
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: '5' (type: string)
    + sort order: +
    + Map-reduce partition columns: '5' (type: string)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: '5' (type: string)
    + sort order: +
    + Map-reduce partition columns: '5' (type: string)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Operator Tree:
    + Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 '5' (type: string)
    + 1 '5' (type: string)
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/cte_mat_3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/cte_mat_3.q.out b/ql/src/test/results/clientpositive/cte_mat_3.q.out
    new file mode 100644
    index 0000000..683228c
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/cte_mat_3.q.out
    @@ -0,0 +1,147 @@
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4
    + Stage-3
    + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5
    + Stage-8 depends on stages: Stage-0
    + Stage-2
    + Stage-4
    + Stage-5 depends on stages: Stage-4
    + Stage-7 depends on stages: Stage-8
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-6
    + Conditional Operator
    +
    + Stage: Stage-3
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-8
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Operator Tree:
    + Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 _col0 (type: string)
    + 1 _col0 (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-2
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-4
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-5
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-7
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/cte_mat_4.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/cte_mat_4.q.out b/ql/src/test/results/clientpositive/cte_mat_4.q.out
    new file mode 100644
    index 0000000..048e4b6
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/cte_mat_4.q.out
    @@ -0,0 +1,477 @@
    +PREHOOK: query: create temporary table q1 (a int, b string)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +POSTHOOK: query: create temporary table q1 (a int, b string)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +PREHOOK: query: insert into q1 values (1, 'A')
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@values__tmp__table__1
    +PREHOOK: Output: default@q1
    +POSTHOOK: query: insert into q1 values (1, 'A')
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@values__tmp__table__1
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Lineage: q1.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
    +POSTHOOK: Lineage: q1.b SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +q1
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +values__tmp__table__1
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4
    + Stage-3
    + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5
    + Stage-8 depends on stages: Stage-0
    + Stage-2
    + Stage-4
    + Stage-5 depends on stages: Stage-4
    + Stage-7 depends on stages: Stage-8
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-6
    + Conditional Operator
    +
    + Stage: Stage-3
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-8
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Operator Tree:
    + Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 _col0 (type: string)
    + 1 _col0 (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-2
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-4
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-5
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-7
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +5
    +5
    +5
    +5
    +5
    +5
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +q1
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +values__tmp__table__1
    +PREHOOK: query: select * from q1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from q1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +#### A masked pattern was here ####
    +1 A
    +PREHOOK: query: drop table q1
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@q1
    +PREHOOK: Output: default@q1
    +POSTHOOK: query: drop table q1
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Output: default@q1
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +values__tmp__table__1
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4
    + Stage-3
    + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5
    + Stage-8 depends on stages: Stage-0
    + Stage-2
    + Stage-4
    + Stage-5 depends on stages: Stage-4
    + Stage-7 depends on stages: Stage-8
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-6
    + Conditional Operator
    +
    + Stage: Stage-3
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-8
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Operator Tree:
    + Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 _col0 (type: string)
    + 1 _col0 (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-2
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-4
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-5
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-7
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +5
    +5
    +5
    +5
    +5
    +5
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +values__tmp__table__1

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/cte_mat_5.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/cte_mat_5.q.out b/ql/src/test/results/clientpositive/cte_mat_5.q.out
    new file mode 100644
    index 0000000..b7b34ba
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/cte_mat_5.q.out
    @@ -0,0 +1,238 @@
    +PREHOOK: query: create database mydb
    +PREHOOK: type: CREATEDATABASE
    +PREHOOK: Output: database:mydb
    +POSTHOOK: query: create database mydb
    +POSTHOOK: type: CREATEDATABASE
    +POSTHOOK: Output: database:mydb
    +PREHOOK: query: use mydb
    +PREHOOK: type: SWITCHDATABASE
    +PREHOOK: Input: database:mydb
    +POSTHOOK: query: use mydb
    +POSTHOOK: type: SWITCHDATABASE
    +POSTHOOK: Input: database:mydb
    +PREHOOK: query: create table q1 (colnum int, colstring string)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:mydb
    +PREHOOK: Output: mydb@q1
    +POSTHOOK: query: create table q1 (colnum int, colstring string)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:mydb
    +POSTHOOK: Output: mydb@q1
    +PREHOOK: query: insert into q1 values (5, 'A')
    +PREHOOK: type: QUERY
    +PREHOOK: Input: mydb@values__tmp__table__1
    +PREHOOK: Output: mydb@q1
    +POSTHOOK: query: insert into q1 values (5, 'A')
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: mydb@values__tmp__table__1
    +POSTHOOK: Output: mydb@q1
    +POSTHOOK: Lineage: q1.colnum EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
    +POSTHOOK: Lineage: q1.colstring SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
    +PREHOOK: query: use default
    +PREHOOK: type: SWITCHDATABASE
    +PREHOOK: Input: database:default
    +POSTHOOK: query: use default
    +POSTHOOK: type: SWITCHDATABASE
    +POSTHOOK: Input: database:default
    +PREHOOK: query: show tables in mydb
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:mydb
    +POSTHOOK: query: show tables in mydb
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:mydb
    +q1
    +values__tmp__table__1
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-6 depends on stages: Stage-1 , consists of Stage-3, Stage-2, Stage-4
    + Stage-3
    + Stage-0 depends on stages: Stage-3, Stage-2, Stage-5
    + Stage-8 depends on stages: Stage-0
    + Stage-2
    + Stage-4
    + Stage-5 depends on stages: Stage-4
    + Stage-7 depends on stages: Stage-8
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-6
    + Conditional Operator
    +
    + Stage: Stage-3
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-8
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: colnum is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: colnum (type: int)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: UDFToDouble(_col0) (type: double)
    + sort order: +
    + Map-reduce partition columns: UDFToDouble(_col0) (type: double)
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + value expressions: _col0 (type: int)
    + TableScan
    + alias: b
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: UDFToDouble(_col0) (type: double)
    + sort order: +
    + Map-reduce partition columns: UDFToDouble(_col0) (type: double)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Operator Tree:
    + Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 UDFToDouble(_col0) (type: double)
    + 1 UDFToDouble(_col0) (type: double)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-2
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-4
    + Map Reduce
    + Map Operator Tree:
    + TableScan
    + File Output Operator
    + compressed: false
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    +
    + Stage: Stage-5
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-7
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Input: mydb@q1
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: mydb@q1
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/llap/cte_1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/cte_1.q.out b/ql/src/test/results/clientpositive/llap/cte_1.q.out
    new file mode 100644
    index 0000000..41ffdc7
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/llap/cte_1.q.out
    @@ -0,0 +1,126 @@
    +PREHOOK: query: explain
    +with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-0 is a root stage
    +
    +STAGE PLANS:
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + TableScan
    + alias: src
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + ListSink
    +
    +PREHOOK: query: with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: -- in subquery
    +explain
    +with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +POSTHOOK: query: -- in subquery
    +explain
    +with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-0 is a root stage
    +
    +STAGE PLANS:
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + TableScan
    + alias: src
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + ListSink
    +
    +PREHOOK: query: with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: -- chaining
    +explain
    +with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +POSTHOOK: query: -- chaining
    +explain
    +with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-0 is a root stage
    +
    +STAGE PLANS:
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + TableScan
    + alias: src
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + ListSink
    +
    +PREHOOK: query: with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +#### A masked pattern was here ####
    +5
    +5
    +5

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/llap/cte_2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/cte_2.q.out b/ql/src/test/results/clientpositive/llap/cte_2.q.out
    new file mode 100644
    index 0000000..23f8ec6
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/llap/cte_2.q.out
    @@ -0,0 +1,189 @@
    +PREHOOK: query: -- union test
    +with q1 as (select * from src where key= '5'),
    +q2 as (select * from src s2 where key = '4')
    +select * from q1 union all select * from q2
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- union test
    +with q1 as (select * from src where key= '5'),
    +q2 as (select * from src s2 where key = '4')
    +select * from q1 union all select * from q2
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +4 val_4
    +PREHOOK: query: -- insert test
    +create table s1 like src
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@s1
    +POSTHOOK: query: -- insert test
    +create table s1 like src
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@s1
    +PREHOOK: query: with q1 as ( select key, value from src where key = '5')
    +from q1
    +insert overwrite table s1
    +select *
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Output: default@s1
    +POSTHOOK: query: with q1 as ( select key, value from src where key = '5')
    +from q1
    +insert overwrite table s1
    +select *
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: default@s1
    +POSTHOOK: Lineage: s1.key SIMPLE []
    +POSTHOOK: Lineage: s1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
    +PREHOOK: query: select * from s1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@s1
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from s1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@s1
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +PREHOOK: query: drop table s1
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@s1
    +PREHOOK: Output: default@s1
    +POSTHOOK: query: drop table s1
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@s1
    +POSTHOOK: Output: default@s1
    +PREHOOK: query: -- from style
    +with q1 as (select * from src where key= '5')
    +from q1
    +select *
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- from style
    +with q1 as (select * from src where key= '5')
    +from q1
    +select *
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +PREHOOK: query: -- ctas
    +create table s2 as
    +with q1 as ( select key from src where key = '4')
    +select * from q1
    +PREHOOK: type: CREATETABLE_AS_SELECT
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@s2
    +POSTHOOK: query: -- ctas
    +create table s2 as
    +with q1 as ( select key from src where key = '4')
    +select * from q1
    +POSTHOOK: type: CREATETABLE_AS_SELECT
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@s2
    +PREHOOK: query: select * from s2
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@s2
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from s2
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@s2
    +#### A masked pattern was here ####
    +4
    +PREHOOK: query: drop table s2
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@s2
    +PREHOOK: Output: default@s2
    +POSTHOOK: query: drop table s2
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@s2
    +POSTHOOK: Output: default@s2
    +PREHOOK: query: -- view test
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +PREHOOK: type: CREATEVIEW
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: -- view test
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +POSTHOOK: type: CREATEVIEW
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: select * from v1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from v1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: drop view v1
    +PREHOOK: type: DROPVIEW
    +PREHOOK: Input: default@v1
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: drop view v1
    +POSTHOOK: type: DROPVIEW
    +POSTHOOK: Input: default@v1
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: -- view test, name collision
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +PREHOOK: type: CREATEVIEW
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: -- view test, name collision
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +POSTHOOK: type: CREATEVIEW
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: with q1 as ( select key from src where key = '4')
    +select * from v1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '4')
    +select * from v1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: drop view v1
    +PREHOOK: type: DROPVIEW
    +PREHOOK: Input: default@v1
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: drop view v1
    +POSTHOOK: type: DROPVIEW
    +POSTHOOK: Input: default@v1
    +POSTHOOK: Output: default@v1
  • Jcamacho at Feb 12, 2016 at 6:41 pm
    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/llap/cte_3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/cte_3.q.out b/ql/src/test/results/clientpositive/llap/cte_3.q.out
    new file mode 100644
    index 0000000..37796c0
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/llap/cte_3.q.out
    @@ -0,0 +1,294 @@
    +PREHOOK: query: explain
    +with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-2 depends on stages: Stage-1
    + Stage-0 depends on stages: Stage-1
    + Stage-3 is a root stage
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Tez
    +#### A masked pattern was here ####
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    + Execution mode: llap
    +
    + Stage: Stage-2
    + Dependency Collection
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + TableScan
    + alias: q1
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + ListSink
    +
    +PREHOOK: query: with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: -- in subquery
    +explain
    +with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +POSTHOOK: query: -- in subquery
    +explain
    +with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-2 depends on stages: Stage-1
    + Stage-0 depends on stages: Stage-1
    + Stage-3 is a root stage
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Tez
    +#### A masked pattern was here ####
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    + Execution mode: llap
    +
    + Stage: Stage-2
    + Dependency Collection
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + TableScan
    + alias: q1
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + ListSink
    +
    +PREHOOK: query: with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: -- chaining
    +explain
    +with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +POSTHOOK: query: -- chaining
    +explain
    +with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-2 depends on stages: Stage-1
    + Stage-4 depends on stages: Stage-2, Stage-0
    + Stage-5 depends on stages: Stage-4
    + Stage-3 depends on stages: Stage-4
    + Stage-0 depends on stages: Stage-1
    + Stage-6 is a root stage
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Tez
    +#### A masked pattern was here ####
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q2
    + Execution mode: llap
    +
    + Stage: Stage-2
    + Dependency Collection
    +
    + Stage: Stage-4
    + Tez
    +#### A masked pattern was here ####
    + Vertices:
    + Map 2
    + Map Operator Tree:
    + TableScan
    + alias: q2
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    + Execution mode: llap
    +
    + Stage: Stage-5
    + Dependency Collection
    +
    + Stage: Stage-3
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-6
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + TableScan
    + alias: q1
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + ListSink
    +
    +PREHOOK: query: with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@q2
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +PREHOOK: Output: default@q2
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@q2
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Output: default@q2
    +#### A masked pattern was here ####
    +5
    +5
    +5

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/llap/cte_4.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/cte_4.q.out b/ql/src/test/results/clientpositive/llap/cte_4.q.out
    new file mode 100644
    index 0000000..d560d74
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/llap/cte_4.q.out
    @@ -0,0 +1,219 @@
    +PREHOOK: query: -- union test
    +with q1 as (select * from src where key= '5'),
    +q2 as (select * from src s2 where key = '4')
    +select * from q1 union all select * from q2
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@q2
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +PREHOOK: Output: default@q2
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- union test
    +with q1 as (select * from src where key= '5'),
    +q2 as (select * from src s2 where key = '4')
    +select * from q1 union all select * from q2
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@q2
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Output: default@q2
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +4 val_4
    +PREHOOK: query: -- insert test
    +create table s1 like src
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@s1
    +POSTHOOK: query: -- insert test
    +create table s1 like src
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@s1
    +PREHOOK: query: with q1 as ( select key, value from src where key = '5')
    +from q1
    +insert overwrite table s1
    +select *
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +PREHOOK: Output: default@s1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key, value from src where key = '5')
    +from q1
    +insert overwrite table s1
    +select *
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Output: default@s1
    +#### A masked pattern was here ####
    +POSTHOOK: Lineage: s1.key SIMPLE [(q1)q1.FieldSchema(name:key, type:string, comment:null), ]
    +POSTHOOK: Lineage: s1.value SIMPLE [(q1)q1.FieldSchema(name:value, type:string, comment:null), ]
    +PREHOOK: query: select * from s1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@s1
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from s1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@s1
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +PREHOOK: query: drop table s1
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@s1
    +PREHOOK: Output: default@s1
    +POSTHOOK: query: drop table s1
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@s1
    +POSTHOOK: Output: default@s1
    +PREHOOK: query: -- from style
    +with q1 as (select * from src where key= '5')
    +from q1
    +select *
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- from style
    +with q1 as (select * from src where key= '5')
    +from q1
    +select *
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +PREHOOK: query: -- ctas
    +create table s2 as
    +with q1 as ( select key from src where key = '4')
    +select * from q1
    +PREHOOK: type: CREATETABLE_AS_SELECT
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +PREHOOK: Output: default@s2
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- ctas
    +create table s2 as
    +with q1 as ( select key from src where key = '4')
    +select * from q1
    +POSTHOOK: type: CREATETABLE_AS_SELECT
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Output: default@s2
    +#### A masked pattern was here ####
    +PREHOOK: query: select * from s2
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@s2
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from s2
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@s2
    +#### A masked pattern was here ####
    +4
    +PREHOOK: query: drop table s2
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@s2
    +PREHOOK: Output: default@s2
    +POSTHOOK: query: drop table s2
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@s2
    +POSTHOOK: Output: default@s2
    +PREHOOK: query: -- view test
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +PREHOOK: type: CREATEVIEW
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: -- view test
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +POSTHOOK: type: CREATEVIEW
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: select * from v1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from v1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: drop view v1
    +PREHOOK: type: DROPVIEW
    +PREHOOK: Input: default@v1
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: drop view v1
    +POSTHOOK: type: DROPVIEW
    +POSTHOOK: Input: default@v1
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: -- view test, name collision
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +PREHOOK: type: CREATEVIEW
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: -- view test, name collision
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +POSTHOOK: type: CREATEVIEW
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: with q1 as ( select key from src where key = '4')
    +select * from v1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '4')
    +select * from v1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: drop view v1
    +PREHOOK: type: DROPVIEW
    +PREHOOK: Input: default@v1
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: drop view v1
    +POSTHOOK: type: DROPVIEW
    +POSTHOOK: Input: default@v1
    +POSTHOOK: Output: default@v1

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/llap/cte_5.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/cte_5.q.out b/ql/src/test/results/clientpositive/llap/cte_5.q.out
    new file mode 100644
    index 0000000..3092398
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/llap/cte_5.q.out
    @@ -0,0 +1,168 @@
    +PREHOOK: query: create database mydb
    +PREHOOK: type: CREATEDATABASE
    +PREHOOK: Output: database:mydb
    +POSTHOOK: query: create database mydb
    +POSTHOOK: type: CREATEDATABASE
    +POSTHOOK: Output: database:mydb
    +PREHOOK: query: use mydb
    +PREHOOK: type: SWITCHDATABASE
    +PREHOOK: Input: database:mydb
    +POSTHOOK: query: use mydb
    +POSTHOOK: type: SWITCHDATABASE
    +POSTHOOK: Input: database:mydb
    +PREHOOK: query: create table q1 (colnum int, colstring string)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:mydb
    +PREHOOK: Output: mydb@q1
    +POSTHOOK: query: create table q1 (colnum int, colstring string)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:mydb
    +POSTHOOK: Output: mydb@q1
    +PREHOOK: query: insert into q1 values (5, 'A')
    +PREHOOK: type: QUERY
    +PREHOOK: Input: mydb@values__tmp__table__1
    +PREHOOK: Output: mydb@q1
    +POSTHOOK: query: insert into q1 values (5, 'A')
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: mydb@values__tmp__table__1
    +POSTHOOK: Output: mydb@q1
    +POSTHOOK: Lineage: q1.colnum EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
    +POSTHOOK: Lineage: q1.colstring SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
    +PREHOOK: query: use default
    +PREHOOK: type: SWITCHDATABASE
    +PREHOOK: Input: database:default
    +POSTHOOK: query: use default
    +POSTHOOK: type: SWITCHDATABASE
    +POSTHOOK: Input: database:default
    +PREHOOK: query: show tables in mydb
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:mydb
    +POSTHOOK: query: show tables in mydb
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:mydb
    +q1
    +values__tmp__table__1
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Tez
    +#### A masked pattern was here ####
    + Edges:
    + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
    +#### A masked pattern was here ####
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: colnum is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: colnum (type: int)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: UDFToDouble(_col0) (type: double)
    + sort order: +
    + Map-reduce partition columns: UDFToDouble(_col0) (type: double)
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + value expressions: _col0 (type: int)
    + Execution mode: llap
    + Map 3
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: 5.0 (type: double)
    + sort order: +
    + Map-reduce partition columns: 5.0 (type: double)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Execution mode: llap
    + Reducer 2
    + Execution mode: llap
    + Reduce Operator Tree:
    + Merge Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 UDFToDouble(_col0) (type: double)
    + 1 5.0 (type: double)
    + outputColumnNames: _col0
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: mydb@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: mydb@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/llap/cte_mat_1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_1.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_1.q.out
    new file mode 100644
    index 0000000..06135d7
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/llap/cte_mat_1.q.out
    @@ -0,0 +1,83 @@
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Tez
    +#### A masked pattern was here ####
    + Edges:
    + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: '5' (type: string)
    + sort order: +
    + Map-reduce partition columns: '5' (type: string)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Execution mode: llap
    + Map 3
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: '5' (type: string)
    + sort order: +
    + Map-reduce partition columns: '5' (type: string)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Execution mode: llap
    + Reducer 2
    + Execution mode: llap
    + Reduce Operator Tree:
    + Merge Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 '5' (type: string)
    + 1 '5' (type: string)
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/llap/cte_mat_2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_2.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_2.q.out
    new file mode 100644
    index 0000000..06135d7
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/llap/cte_mat_2.q.out
    @@ -0,0 +1,83 @@
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-0 depends on stages: Stage-1
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Tez
    +#### A masked pattern was here ####
    + Edges:
    + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: '5' (type: string)
    + sort order: +
    + Map-reduce partition columns: '5' (type: string)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Execution mode: llap
    + Map 3
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: '5' (type: string)
    + sort order: +
    + Map-reduce partition columns: '5' (type: string)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Execution mode: llap
    + Reducer 2
    + Execution mode: llap
    + Reduce Operator Tree:
    + Merge Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 '5' (type: string)
    + 1 '5' (type: string)
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-0
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out
    new file mode 100644
    index 0000000..cade4c9
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out
    @@ -0,0 +1,122 @@
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-2 depends on stages: Stage-1
    + Stage-4 depends on stages: Stage-2, Stage-0
    + Stage-0 depends on stages: Stage-1
    + Stage-3 depends on stages: Stage-4
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Tez
    +#### A masked pattern was here ####
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    + Execution mode: llap
    +
    + Stage: Stage-2
    + Dependency Collection
    +
    + Stage: Stage-4
    + Tez
    +#### A masked pattern was here ####
    + Edges:
    + Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
    + Vertices:
    + Map 2
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Execution mode: llap
    + Map 4
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Execution mode: llap
    + Reducer 3
    + Execution mode: llap
    + Reduce Operator Tree:
    + Merge Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 _col0 (type: string)
    + 1 _col0 (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out
    new file mode 100644
    index 0000000..1a53b43
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out
    @@ -0,0 +1,429 @@
    +PREHOOK: query: create temporary table q1 (a int, b string)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +POSTHOOK: query: create temporary table q1 (a int, b string)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +PREHOOK: query: insert into q1 values (1, 'A')
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@values__tmp__table__1
    +PREHOOK: Output: default@q1
    +POSTHOOK: query: insert into q1 values (1, 'A')
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@values__tmp__table__1
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Lineage: q1.a EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
    +POSTHOOK: Lineage: q1.b SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +q1
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +values__tmp__table__1
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-2 depends on stages: Stage-1
    + Stage-4 depends on stages: Stage-2, Stage-0
    + Stage-0 depends on stages: Stage-1
    + Stage-3 depends on stages: Stage-4
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Tez
    +#### A masked pattern was here ####
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    + Execution mode: llap
    +
    + Stage: Stage-2
    + Dependency Collection
    +
    + Stage: Stage-4
    + Tez
    +#### A masked pattern was here ####
    + Edges:
    + Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
    +#### A masked pattern was here ####
    + Vertices:
    + Map 2
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Execution mode: llap
    + Map 4
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Execution mode: llap
    + Reducer 3
    + Execution mode: llap
    + Reduce Operator Tree:
    + Merge Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 _col0 (type: string)
    + 1 _col0 (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +5
    +5
    +5
    +5
    +5
    +5
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +q1
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +values__tmp__table__1
    +PREHOOK: query: select * from q1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from q1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +#### A masked pattern was here ####
    +1 A
    +PREHOOK: query: drop table q1
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@q1
    +PREHOOK: Output: default@q1
    +POSTHOOK: query: drop table q1
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Output: default@q1
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +values__tmp__table__1
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-2 depends on stages: Stage-1
    + Stage-4 depends on stages: Stage-2, Stage-0
    + Stage-0 depends on stages: Stage-1
    + Stage-3 depends on stages: Stage-4
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Tez
    +#### A masked pattern was here ####
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    + Execution mode: llap
    +
    + Stage: Stage-2
    + Dependency Collection
    +
    + Stage: Stage-4
    + Tez
    +#### A masked pattern was here ####
    + Edges:
    + Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
    +#### A masked pattern was here ####
    + Vertices:
    + Map 2
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Execution mode: llap
    + Map 4
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: _col0 (type: string)
    + sort order: +
    + Map-reduce partition columns: _col0 (type: string)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Execution mode: llap
    + Reducer 3
    + Execution mode: llap
    + Reduce Operator Tree:
    + Merge Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 _col0 (type: string)
    + 1 _col0 (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as (select * from src where key= '5')
    +select a.key
    +from q1 a join q1 b
    +on a.key=b.key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +5
    +5
    +5
    +5
    +5
    +5
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +values__tmp__table__1

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out
    new file mode 100644
    index 0000000..9fa3e7d
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out
    @@ -0,0 +1,214 @@
    +PREHOOK: query: create database mydb
    +PREHOOK: type: CREATEDATABASE
    +PREHOOK: Output: database:mydb
    +POSTHOOK: query: create database mydb
    +POSTHOOK: type: CREATEDATABASE
    +POSTHOOK: Output: database:mydb
    +PREHOOK: query: use mydb
    +PREHOOK: type: SWITCHDATABASE
    +PREHOOK: Input: database:mydb
    +POSTHOOK: query: use mydb
    +POSTHOOK: type: SWITCHDATABASE
    +POSTHOOK: Input: database:mydb
    +PREHOOK: query: create table q1 (colnum int, colstring string)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:mydb
    +PREHOOK: Output: mydb@q1
    +POSTHOOK: query: create table q1 (colnum int, colstring string)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:mydb
    +POSTHOOK: Output: mydb@q1
    +PREHOOK: query: insert into q1 values (5, 'A')
    +PREHOOK: type: QUERY
    +PREHOOK: Input: mydb@values__tmp__table__1
    +PREHOOK: Output: mydb@q1
    +POSTHOOK: query: insert into q1 values (5, 'A')
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: mydb@values__tmp__table__1
    +POSTHOOK: Output: mydb@q1
    +POSTHOOK: Lineage: q1.colnum EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
    +POSTHOOK: Lineage: q1.colstring SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
    +PREHOOK: query: use default
    +PREHOOK: type: SWITCHDATABASE
    +PREHOOK: Input: database:default
    +POSTHOOK: query: use default
    +POSTHOOK: type: SWITCHDATABASE
    +POSTHOOK: Input: database:default
    +PREHOOK: query: show tables in mydb
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:mydb
    +POSTHOOK: query: show tables in mydb
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:mydb
    +q1
    +values__tmp__table__1
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +POSTHOOK: type: QUERY
    +STAGE DEPENDENCIES:
    + Stage-1 is a root stage
    + Stage-2 depends on stages: Stage-1
    + Stage-4 depends on stages: Stage-2, Stage-0
    + Stage-0 depends on stages: Stage-1
    + Stage-3 depends on stages: Stage-4
    +
    +STAGE PLANS:
    + Stage: Stage-1
    + Tez
    +#### A masked pattern was here ####
    + Vertices:
    + Map 1
    + Map Operator Tree:
    + TableScan
    + alias: src
    + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: (key = '5') (type: boolean)
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: '5' (type: string), value (type: string)
    + outputColumnNames: _col0, _col1
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    + name: default.q1
    + Execution mode: llap
    +
    + Stage: Stage-2
    + Dependency Collection
    +
    + Stage: Stage-4
    + Tez
    +#### A masked pattern was here ####
    + Edges:
    + Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE)
    +#### A masked pattern was here ####
    + Vertices:
    + Map 2
    + Map Operator Tree:
    + TableScan
    + alias: a
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + Filter Operator
    + predicate: colnum is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + Select Operator
    + expressions: colnum (type: int)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + Reduce Output Operator
    + key expressions: UDFToDouble(_col0) (type: double)
    + sort order: +
    + Map-reduce partition columns: UDFToDouble(_col0) (type: double)
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + value expressions: _col0 (type: int)
    + Execution mode: llap
    + Map 4
    + Map Operator Tree:
    + TableScan
    + alias: b
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Filter Operator
    + predicate: key is not null (type: boolean)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Select Operator
    + expressions: key (type: string)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Reduce Output Operator
    + key expressions: UDFToDouble(_col0) (type: double)
    + sort order: +
    + Map-reduce partition columns: UDFToDouble(_col0) (type: double)
    + Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: NONE
    + Execution mode: llap
    + Reducer 3
    + Execution mode: llap
    + Reduce Operator Tree:
    + Merge Join Operator
    + condition map:
    + Inner Join 0 to 1
    + keys:
    + 0 UDFToDouble(_col0) (type: double)
    + 1 UDFToDouble(_col0) (type: double)
    + outputColumnNames: _col0
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + File Output Operator
    + compressed: false
    + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE
    + table:
    + input format: org.apache.hadoop.mapred.TextInputFormat
    + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
    + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
    +
    + Stage: Stage-0
    + Move Operator
    + files:
    + hdfs directory: true
    +#### A masked pattern was here ####
    +
    + Stage: Stage-3
    + Fetch Operator
    + limit: -1
    + Processor Tree:
    + ListSink
    +
    +PREHOOK: query: with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Input: mydb@q1
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: mydb@q1
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/llap/tez_union.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/llap/tez_union.q.out b/ql/src/test/results/clientpositive/llap/tez_union.q.out
    index 18e0046..b12bd6d 100644
    --- a/ql/src/test/results/clientpositive/llap/tez_union.q.out
    +++ b/ql/src/test/results/clientpositive/llap/tez_union.q.out
    @@ -1436,7 +1436,6 @@ POSTHOOK: Input: default@table1
      POSTHOOK: Input: default@table2
      POSTHOOK: Output: database:default
      POSTHOOK: Output: default@TABLE3
    -POSTHOOK: Output: default@table3
      PREHOOK: query: explain formatted select count(*) from TABLE3
      PREHOOK: type: QUERY
      POSTHOOK: query: explain formatted select count(*) from TABLE3

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/tez/cte_1.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/cte_1.q.out b/ql/src/test/results/clientpositive/tez/cte_1.q.out
    new file mode 100644
    index 0000000..1b24fb0
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/tez/cte_1.q.out
    @@ -0,0 +1,111 @@
    +PREHOOK: query: explain
    +with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +POSTHOOK: type: QUERY
    +Plan optimized by CBO.
    +
    +Stage-0
    + Fetch Operator
    + limit:-1
    + Select Operator [SEL_2]
    + Output:["_col0"]
    + Filter Operator [FIL_4]
    + predicate:(key = '5')
    + TableScan [TS_0]
    + Output:["key"]
    +
    +PREHOOK: query: with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: -- in subquery
    +explain
    +with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +POSTHOOK: query: -- in subquery
    +explain
    +with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +Plan optimized by CBO.
    +
    +Stage-0
    + Fetch Operator
    + limit:-1
    + Select Operator [SEL_2]
    + Output:["_col0"]
    + Filter Operator [FIL_4]
    + predicate:(key = '5')
    + TableScan [TS_0]
    + Output:["key"]
    +
    +PREHOOK: query: with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: -- chaining
    +explain
    +with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +POSTHOOK: query: -- chaining
    +explain
    +with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +Plan optimized by CBO.
    +
    +Stage-0
    + Fetch Operator
    + limit:-1
    + Select Operator [SEL_2]
    + Output:["_col0"]
    + Filter Operator [FIL_4]
    + predicate:(key = '5')
    + TableScan [TS_0]
    + Output:["key"]
    +
    +PREHOOK: query: with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +#### A masked pattern was here ####
    +5
    +5
    +5

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/tez/cte_2.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/cte_2.q.out b/ql/src/test/results/clientpositive/tez/cte_2.q.out
    new file mode 100644
    index 0000000..23f8ec6
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/tez/cte_2.q.out
    @@ -0,0 +1,189 @@
    +PREHOOK: query: -- union test
    +with q1 as (select * from src where key= '5'),
    +q2 as (select * from src s2 where key = '4')
    +select * from q1 union all select * from q2
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- union test
    +with q1 as (select * from src where key= '5'),
    +q2 as (select * from src s2 where key = '4')
    +select * from q1 union all select * from q2
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +4 val_4
    +PREHOOK: query: -- insert test
    +create table s1 like src
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@s1
    +POSTHOOK: query: -- insert test
    +create table s1 like src
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@s1
    +PREHOOK: query: with q1 as ( select key, value from src where key = '5')
    +from q1
    +insert overwrite table s1
    +select *
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Output: default@s1
    +POSTHOOK: query: with q1 as ( select key, value from src where key = '5')
    +from q1
    +insert overwrite table s1
    +select *
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: default@s1
    +POSTHOOK: Lineage: s1.key SIMPLE []
    +POSTHOOK: Lineage: s1.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ]
    +PREHOOK: query: select * from s1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@s1
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from s1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@s1
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +PREHOOK: query: drop table s1
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@s1
    +PREHOOK: Output: default@s1
    +POSTHOOK: query: drop table s1
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@s1
    +POSTHOOK: Output: default@s1
    +PREHOOK: query: -- from style
    +with q1 as (select * from src where key= '5')
    +from q1
    +select *
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- from style
    +with q1 as (select * from src where key= '5')
    +from q1
    +select *
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +PREHOOK: query: -- ctas
    +create table s2 as
    +with q1 as ( select key from src where key = '4')
    +select * from q1
    +PREHOOK: type: CREATETABLE_AS_SELECT
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@s2
    +POSTHOOK: query: -- ctas
    +create table s2 as
    +with q1 as ( select key from src where key = '4')
    +select * from q1
    +POSTHOOK: type: CREATETABLE_AS_SELECT
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@s2
    +PREHOOK: query: select * from s2
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@s2
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from s2
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@s2
    +#### A masked pattern was here ####
    +4
    +PREHOOK: query: drop table s2
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@s2
    +PREHOOK: Output: default@s2
    +POSTHOOK: query: drop table s2
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@s2
    +POSTHOOK: Output: default@s2
    +PREHOOK: query: -- view test
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +PREHOOK: type: CREATEVIEW
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: -- view test
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +POSTHOOK: type: CREATEVIEW
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: select * from v1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from v1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: drop view v1
    +PREHOOK: type: DROPVIEW
    +PREHOOK: Input: default@v1
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: drop view v1
    +POSTHOOK: type: DROPVIEW
    +POSTHOOK: Input: default@v1
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: -- view test, name collision
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +PREHOOK: type: CREATEVIEW
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: -- view test, name collision
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +POSTHOOK: type: CREATEVIEW
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: with q1 as ( select key from src where key = '4')
    +select * from v1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '4')
    +select * from v1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: drop view v1
    +PREHOOK: type: DROPVIEW
    +PREHOOK: Input: default@v1
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: drop view v1
    +POSTHOOK: type: DROPVIEW
    +POSTHOOK: Input: default@v1
    +POSTHOOK: Output: default@v1

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/tez/cte_3.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/cte_3.q.out b/ql/src/test/results/clientpositive/tez/cte_3.q.out
    new file mode 100644
    index 0000000..fedbb7d
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/tez/cte_3.q.out
    @@ -0,0 +1,187 @@
    +PREHOOK: query: explain
    +with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +POSTHOOK: type: QUERY
    +Plan optimized by CBO.
    +
    +Stage-2
    + Dependency Collection{}
    + Stage-1
    + Map 1
    + File Output Operator [FS_3]
    + table:{"name:":"default.q1"}
    + Select Operator [SEL_2] (rows=250 width=10)
    + Output:["_col0"]
    + Filter Operator [FIL_4] (rows=250 width=10)
    + predicate:(key = '5')
    + TableScan [TS_0] (rows=500 width=10)
    + default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]
    +Stage-0
    + Move Operator
    + Please refer to the previous Stage-1
    +Stage-3
    + Fetch Operator
    + limit:-1
    + Select Operator [SEL_6]
    + Output:["_col0"]
    + TableScan [TS_5]
    + Output:["key"]
    +
    +PREHOOK: query: with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '5')
    +select *
    +from q1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: -- in subquery
    +explain
    +with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +POSTHOOK: query: -- in subquery
    +explain
    +with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +Plan optimized by CBO.
    +
    +Stage-2
    + Dependency Collection{}
    + Stage-1
    + Map 1
    + File Output Operator [FS_3]
    + table:{"name:":"default.q1"}
    + Select Operator [SEL_2] (rows=250 width=10)
    + Output:["_col0"]
    + Filter Operator [FIL_4] (rows=250 width=10)
    + predicate:(key = '5')
    + TableScan [TS_0] (rows=500 width=10)
    + default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]
    +Stage-0
    + Move Operator
    + Please refer to the previous Stage-1
    +Stage-3
    + Fetch Operator
    + limit:-1
    + Select Operator [SEL_6]
    + Output:["_col0"]
    + TableScan [TS_5]
    + Output:["key"]
    +
    +PREHOOK: query: with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: -- chaining
    +explain
    +with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +POSTHOOK: query: -- chaining
    +explain
    +with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +Plan optimized by CBO.
    +
    +Stage-5
    + Dependency Collection{}
    + Stage-4
    + Map 2
    + File Output Operator [FS_8]
    + table:{"name:":"default.q1"}
    + Select Operator [SEL_7] (rows=1 width=0)
    + Output:["_col0"]
    + Filter Operator [FIL_9] (rows=1 width=0)
    + predicate:(key = '5')
    + TableScan [TS_5] (rows=1 width=0)
    + default@q2,q2,Tbl:PARTIAL,Col:NONE,Output:["key"]
    + Stage-2
    + Dependency Collection{}
    + Stage-1
    + Map 1
    + File Output Operator [FS_3]
    + table:{"name:":"default.q2"}
    + Select Operator [SEL_2] (rows=250 width=10)
    + Output:["_col0"]
    + Filter Operator [FIL_4] (rows=250 width=10)
    + predicate:(key = '5')
    + TableScan [TS_0] (rows=500 width=10)
    + default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]
    + Stage-0
    + Move Operator
    + Please refer to the previous Stage-1
    +Stage-3
    + Move Operator
    + Please refer to the previous Stage-4
    +Stage-6
    + Fetch Operator
    + limit:-1
    + Select Operator [SEL_11]
    + Output:["_col0"]
    + TableScan [TS_10]
    + Output:["key"]
    +
    +PREHOOK: query: with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@q2
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +PREHOOK: Output: default@q2
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from q2 where key = '5'),
    +q2 as ( select key from src where key = '5')
    +select * from (select key from q1) a
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@q2
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Output: default@q2
    +#### A masked pattern was here ####
    +5
    +5
    +5

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/tez/cte_4.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/cte_4.q.out b/ql/src/test/results/clientpositive/tez/cte_4.q.out
    new file mode 100644
    index 0000000..d560d74
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/tez/cte_4.q.out
    @@ -0,0 +1,219 @@
    +PREHOOK: query: -- union test
    +with q1 as (select * from src where key= '5'),
    +q2 as (select * from src s2 where key = '4')
    +select * from q1 union all select * from q2
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@q2
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +PREHOOK: Output: default@q2
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- union test
    +with q1 as (select * from src where key= '5'),
    +q2 as (select * from src s2 where key = '4')
    +select * from q1 union all select * from q2
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@q2
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Output: default@q2
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +4 val_4
    +PREHOOK: query: -- insert test
    +create table s1 like src
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@s1
    +POSTHOOK: query: -- insert test
    +create table s1 like src
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@s1
    +PREHOOK: query: with q1 as ( select key, value from src where key = '5')
    +from q1
    +insert overwrite table s1
    +select *
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +PREHOOK: Output: default@s1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key, value from src where key = '5')
    +from q1
    +insert overwrite table s1
    +select *
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Output: default@s1
    +#### A masked pattern was here ####
    +POSTHOOK: Lineage: s1.key SIMPLE [(q1)q1.FieldSchema(name:key, type:string, comment:null), ]
    +POSTHOOK: Lineage: s1.value SIMPLE [(q1)q1.FieldSchema(name:value, type:string, comment:null), ]
    +PREHOOK: query: select * from s1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@s1
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from s1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@s1
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +PREHOOK: query: drop table s1
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@s1
    +PREHOOK: Output: default@s1
    +POSTHOOK: query: drop table s1
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@s1
    +POSTHOOK: Output: default@s1
    +PREHOOK: query: -- from style
    +with q1 as (select * from src where key= '5')
    +from q1
    +select *
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- from style
    +with q1 as (select * from src where key= '5')
    +from q1
    +select *
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +#### A masked pattern was here ####
    +5 val_5
    +5 val_5
    +5 val_5
    +PREHOOK: query: -- ctas
    +create table s2 as
    +with q1 as ( select key from src where key = '4')
    +select * from q1
    +PREHOOK: type: CREATETABLE_AS_SELECT
    +PREHOOK: Input: default@q1
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@q1
    +PREHOOK: Output: default@s2
    +#### A masked pattern was here ####
    +POSTHOOK: query: -- ctas
    +create table s2 as
    +with q1 as ( select key from src where key = '4')
    +select * from q1
    +POSTHOOK: type: CREATETABLE_AS_SELECT
    +POSTHOOK: Input: default@q1
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@q1
    +POSTHOOK: Output: default@s2
    +#### A masked pattern was here ####
    +PREHOOK: query: select * from s2
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@s2
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from s2
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@s2
    +#### A masked pattern was here ####
    +4
    +PREHOOK: query: drop table s2
    +PREHOOK: type: DROPTABLE
    +PREHOOK: Input: default@s2
    +PREHOOK: Output: default@s2
    +POSTHOOK: query: drop table s2
    +POSTHOOK: type: DROPTABLE
    +POSTHOOK: Input: default@s2
    +POSTHOOK: Output: default@s2
    +PREHOOK: query: -- view test
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +PREHOOK: type: CREATEVIEW
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: -- view test
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +POSTHOOK: type: CREATEVIEW
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: select * from v1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +POSTHOOK: query: select * from v1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: drop view v1
    +PREHOOK: type: DROPVIEW
    +PREHOOK: Input: default@v1
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: drop view v1
    +POSTHOOK: type: DROPVIEW
    +POSTHOOK: Input: default@v1
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: -- view test, name collision
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +PREHOOK: type: CREATEVIEW
    +PREHOOK: Input: default@src
    +PREHOOK: Output: database:default
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: -- view test, name collision
    +create view v1 as
    +with q1 as ( select key from src where key = '5')
    +select * from q1
    +POSTHOOK: type: CREATEVIEW
    +POSTHOOK: Input: default@src
    +POSTHOOK: Output: database:default
    +POSTHOOK: Output: default@v1
    +PREHOOK: query: with q1 as ( select key from src where key = '4')
    +select * from v1
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as ( select key from src where key = '4')
    +select * from v1
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: default@v1
    +#### A masked pattern was here ####
    +5
    +5
    +5
    +PREHOOK: query: drop view v1
    +PREHOOK: type: DROPVIEW
    +PREHOOK: Input: default@v1
    +PREHOOK: Output: default@v1
    +POSTHOOK: query: drop view v1
    +POSTHOOK: type: DROPVIEW
    +POSTHOOK: Input: default@v1
    +POSTHOOK: Output: default@v1

    http://git-wip-us.apache.org/repos/asf/hive/blob/dca4233d/ql/src/test/results/clientpositive/tez/cte_5.q.out
    ----------------------------------------------------------------------
    diff --git a/ql/src/test/results/clientpositive/tez/cte_5.q.out b/ql/src/test/results/clientpositive/tez/cte_5.q.out
    new file mode 100644
    index 0000000..579b4f3
    --- /dev/null
    +++ b/ql/src/test/results/clientpositive/tez/cte_5.q.out
    @@ -0,0 +1,127 @@
    +PREHOOK: query: create database mydb
    +PREHOOK: type: CREATEDATABASE
    +PREHOOK: Output: database:mydb
    +POSTHOOK: query: create database mydb
    +POSTHOOK: type: CREATEDATABASE
    +POSTHOOK: Output: database:mydb
    +PREHOOK: query: use mydb
    +PREHOOK: type: SWITCHDATABASE
    +PREHOOK: Input: database:mydb
    +POSTHOOK: query: use mydb
    +POSTHOOK: type: SWITCHDATABASE
    +POSTHOOK: Input: database:mydb
    +PREHOOK: query: create table q1 (colnum int, colstring string)
    +PREHOOK: type: CREATETABLE
    +PREHOOK: Output: database:mydb
    +PREHOOK: Output: mydb@q1
    +POSTHOOK: query: create table q1 (colnum int, colstring string)
    +POSTHOOK: type: CREATETABLE
    +POSTHOOK: Output: database:mydb
    +POSTHOOK: Output: mydb@q1
    +PREHOOK: query: insert into q1 values (5, 'A')
    +PREHOOK: type: QUERY
    +PREHOOK: Input: mydb@values__tmp__table__1
    +PREHOOK: Output: mydb@q1
    +POSTHOOK: query: insert into q1 values (5, 'A')
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: mydb@values__tmp__table__1
    +POSTHOOK: Output: mydb@q1
    +POSTHOOK: Lineage: q1.colnum EXPRESSION [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col1, type:string, comment:), ]
    +POSTHOOK: Lineage: q1.colstring SIMPLE [(values__tmp__table__1)values__tmp__table__1.FieldSchema(name:tmp_values_col2, type:string, comment:), ]
    +PREHOOK: query: use default
    +PREHOOK: type: SWITCHDATABASE
    +PREHOOK: Input: database:default
    +POSTHOOK: query: use default
    +POSTHOOK: type: SWITCHDATABASE
    +POSTHOOK: Input: database:default
    +PREHOOK: query: show tables in mydb
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:mydb
    +POSTHOOK: query: show tables in mydb
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:mydb
    +q1
    +values__tmp__table__1
    +PREHOOK: query: show tables
    +PREHOOK: type: SHOWTABLES
    +PREHOOK: Input: database:default
    +POSTHOOK: query: show tables
    +POSTHOOK: type: SHOWTABLES
    +POSTHOOK: Input: database:default
    +alltypesorc
    +cbo_t1
    +cbo_t2
    +cbo_t3
    +lineitem
    +part
    +src
    +src1
    +src_cbo
    +src_json
    +src_sequencefile
    +src_thrift
    +srcbucket
    +srcbucket2
    +srcpart
    +PREHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +PREHOOK: type: QUERY
    +POSTHOOK: query: explain
    +with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +POSTHOOK: type: QUERY
    +Plan optimized by CBO.
    +
    +Vertex dependency in root stage
    +Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 3 (SIMPLE_EDGE)
    +
    +Stage-0
    + Fetch Operator
    + limit:-1
    + Stage-1
    + Reducer 2
    + File Output Operator [FS_10]
    + Merge Join Operator [MERGEJOIN_15] (rows=275 width=10)
    + Conds:RS_6.UDFToDouble(_col0)=RS_7.5.0(Inner),Output:["_col0"]
    + <-Map 1 [SIMPLE_EDGE]
    + SHUFFLE [RS_6]
    + PartitionCols:UDFToDouble(_col0)
    + Select Operator [SEL_2] (rows=1 width=3)
    + Output:["_col0"]
    + Filter Operator [FIL_13] (rows=1 width=3)
    + predicate:colnum is not null
    + TableScan [TS_0] (rows=1 width=3)
    + mydb@q1,a,Tbl:COMPLETE,Col:NONE,Output:["colnum"]
    + <-Map 3 [SIMPLE_EDGE]
    + SHUFFLE [RS_7]
    + PartitionCols:5.0
    + Select Operator [SEL_5] (rows=250 width=10)
    + Filter Operator [FIL_14] (rows=250 width=10)
    + predicate:(key = '5')
    + TableScan [TS_3] (rows=500 width=10)
    + default@src,src,Tbl:COMPLETE,Col:NONE,Output:["key"]
    +
    +PREHOOK: query: with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +PREHOOK: type: QUERY
    +PREHOOK: Input: default@src
    +PREHOOK: Input: mydb@q1
    +#### A masked pattern was here ####
    +POSTHOOK: query: with q1 as (select * from src where key= '5')
    +select a.colnum
    +from mydb.q1 as a join q1 as b
    +on a.colnum=b.key
    +POSTHOOK: type: QUERY
    +POSTHOOK: Input: default@src
    +POSTHOOK: Input: mydb@q1
    +#### A masked pattern was here ####
    +5
    +5
    +5

Related Discussions

Discussion Navigation
viewthread | post
Discussion Overview
groupcommits @
categorieshive, hadoop
postedFeb 12, '16 at 6:41p
activeFeb 12, '16 at 6:41p
posts4
users1
websitehive.apache.org

1 user in discussion

Jcamacho: 4 posts

People

Translate

site design / logo © 2021 Grokbase