FAQ
Author: gunther
Date: Wed Jul 30 00:09:09 2014
New Revision: 1614525

URL: http://svn.apache.org/r1614525
Log:
HIVE-7840: Support Windowing Functions (Laljo John Pullokkaran via Gunther Hagleitner)

Modified:
     hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java
     hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
     hive/branches/cbo/ql/src/test/queries/clientpositive/cbo_correctness.q
     hive/branches/cbo/ql/src/test/results/clientpositive/cbo_correctness.q.out

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java?rev=1614525&r1=1614524&r2=1614525&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/translator/ASTConverter.java Wed Jul 30 00:09:09 2014
@@ -26,10 +26,14 @@ import org.eigenbase.rel.SortRel;
  import org.eigenbase.rel.TableAccessRelBase;
  import org.eigenbase.reltype.RelDataTypeField;
  import org.eigenbase.rex.RexCall;
+import org.eigenbase.rex.RexFieldCollation;
  import org.eigenbase.rex.RexInputRef;
  import org.eigenbase.rex.RexLiteral;
  import org.eigenbase.rex.RexNode;
+import org.eigenbase.rex.RexOver;
  import org.eigenbase.rex.RexVisitorImpl;
+import org.eigenbase.rex.RexWindow;
+import org.eigenbase.rex.RexWindowBound;
  import org.eigenbase.sql.SqlKind;
  import org.eigenbase.sql.SqlOperator;
  import org.eigenbase.sql.type.BasicSqlType;
@@ -49,7 +53,7 @@ public class ASTConverter {
    SortRel order;

    Schema schema;
-
+
    ASTConverter(RelNode root) {
      this.root = root;
      hiveAST = new HiveAST();
@@ -128,10 +132,10 @@ public class ASTConverter {
      hiveAST.select = b.node();

      /*
- * 7. Order
- * Use in Order By from the block above. RelNode has no pointer to parent
- * hence we need to go top down; but OB at each block really belong to its
- * src/from. Hence the need to pass in sortRel for each block from its parent.
+ * 7. Order Use in Order By from the block above. RelNode has no pointer to
+ * parent hence we need to go top down; but OB at each block really belong
+ * to its src/from. Hence the need to pass in sortRel for each block from
+ * its parent.
       */
      if (sortrel != null) {
        HiveSortRel hiveSort = (HiveSortRel) sortrel;
@@ -145,9 +149,9 @@ public class ASTConverter {
             * ASTNode on unqualified name.
             */
            ASTNode astCol = ASTBuilder.unqualifiedName(cI.column);
- ASTNode astNode = c.getDirection() == RelFieldCollation.Direction.ASCENDING
- ? ASTBuilder.createAST(HiveParser.TOK_TABSORTCOLNAMEASC, "TOK_TABSORTCOLNAMEASC")
- : ASTBuilder.createAST(HiveParser.TOK_TABSORTCOLNAMEDESC, "TOK_TABSORTCOLNAMEDESC");
+ ASTNode astNode = c.getDirection() == RelFieldCollation.Direction.ASCENDING ? ASTBuilder
+ .createAST(HiveParser.TOK_TABSORTCOLNAMEASC, "TOK_TABSORTCOLNAMEASC") : ASTBuilder
+ .createAST(HiveParser.TOK_TABSORTCOLNAMEDESC, "TOK_TABSORTCOLNAMEDESC");
            astNode.addChild(astCol);
            orderAst.addChild(astNode);
          }
@@ -182,7 +186,8 @@ public class ASTConverter {
        QueryBlockInfo right = convertSource(join.getRight());
        s = new Schema(left.schema, right.schema);
        ASTNode cond = join.getCondition().accept(new RexVisitor(s));
- boolean semiJoin = ((join instanceof HiveJoinRel) && ((HiveJoinRel)join).isLeftSemiJoin()) ? true : false;
+ boolean semiJoin = ((join instanceof HiveJoinRel) && ((HiveJoinRel) join).isLeftSemiJoin()) ? true
+ : false;
        ast = ASTBuilder.join(left.ast, right.ast, join.getJoinType(), cond, semiJoin);
        if (semiJoin)
          s = left.schema;
@@ -264,6 +269,119 @@ public class ASTConverter {
        return ASTBuilder.literal(literal);
      }

+ private ASTNode getPSpecAST(RexWindow window) {
+ ASTNode pSpecAst = null;
+
+ ASTNode dByAst = null;
+ if (window.partitionKeys != null && !window.partitionKeys.isEmpty()) {
+ dByAst = ASTBuilder.createAST(HiveParser.TOK_DISTRIBUTEBY, "TOK_DISTRIBUTEBY");
+ for (RexNode pk : window.partitionKeys) {
+ ASTNode astCol = pk.accept(this);
+ dByAst.addChild(astCol);
+ }
+ }
+
+ ASTNode oByAst = null;
+ if (window.orderKeys != null && !window.orderKeys.isEmpty()) {
+ oByAst = ASTBuilder.createAST(HiveParser.TOK_ORDERBY, "TOK_ORDERBY");
+ for (RexFieldCollation ok : window.orderKeys) {
+ ASTNode astNode = ok.getDirection() == RelFieldCollation.Direction.ASCENDING ? ASTBuilder
+ .createAST(HiveParser.TOK_TABSORTCOLNAMEASC, "TOK_TABSORTCOLNAMEASC") : ASTBuilder
+ .createAST(HiveParser.TOK_TABSORTCOLNAMEDESC, "TOK_TABSORTCOLNAMEDESC");
+ ASTNode astCol = ok.left.accept(this);
+ astNode.addChild(astCol);
+ oByAst.addChild(astNode);
+ }
+ }
+
+ if (dByAst != null || oByAst != null) {
+ pSpecAst = ASTBuilder.createAST(HiveParser.TOK_PARTITIONINGSPEC, "TOK_PARTITIONINGSPEC");
+ if (dByAst != null)
+ pSpecAst.addChild(dByAst);
+ if (oByAst != null)
+ pSpecAst.addChild(oByAst);
+ }
+
+ return pSpecAst;
+ }
+
+ private ASTNode getWindowBound(RexWindowBound wb) {
+ ASTNode wbAST = null;
+
+ if (wb.isCurrentRow()) {
+ wbAST = ASTBuilder.createAST(HiveParser.KW_CURRENT, "CURRENT");
+ } else {
+ if (wb.isPreceding())
+ wbAST = ASTBuilder.createAST(HiveParser.KW_PRECEDING, "PRECEDING");
+ else
+ wbAST = ASTBuilder.createAST(HiveParser.KW_FOLLOWING, "FOLLOWING");
+ if (wb.isUnbounded()) {
+ wbAST.addChild(ASTBuilder.createAST(HiveParser.KW_UNBOUNDED, "UNBOUNDED"));
+ } else {
+ ASTNode offset = wb.getOffset().accept(this);
+ wbAST.addChild(offset);
+ }
+ }
+
+ return wbAST;
+ }
+
+ private ASTNode getWindowRangeAST(RexWindow window) {
+ ASTNode wRangeAst = null;
+
+ ASTNode startAST = null;
+ RexWindowBound ub = (RexWindowBound)window.getUpperBound();
+ if (ub != null) {
+ startAST = getWindowBound(ub);
+ }
+
+ ASTNode endAST = null;
+ RexWindowBound lb = (RexWindowBound)window.getLowerBound();
+ if (lb != null) {
+ endAST = getWindowBound(lb);
+ }
+
+ if (startAST != null || endAST != null) {
+ //NOTE: in Hive AST Rows->Range(Physical) & Range -> Values (logical)
+ if (window.isRows())
+ wRangeAst = ASTBuilder.createAST(HiveParser.TOK_WINDOWRANGE, "TOK_WINDOWRANGE");
+ else
+ wRangeAst = ASTBuilder.createAST(HiveParser.TOK_WINDOWVALUES, "TOK_WINDOWVALUES");
+ if (startAST != null)
+ wRangeAst.addChild(startAST);
+ if (endAST != null)
+ wRangeAst.addChild(endAST);
+ }
+
+ return wRangeAst;
+ }
+
+ @Override
+ public ASTNode visitOver(RexOver over) {
+ if (!deep) {
+ return null;
+ }
+
+ // 1. Translate the UDAF
+ final ASTNode wUDAFAst = visitCall(over);
+
+ // 2. Add TOK_WINDOW as child of UDAF
+ ASTNode wSpec = ASTBuilder.createAST(HiveParser.TOK_WINDOWSPEC, "TOK_WINDOWSPEC");
+ wUDAFAst.addChild(wSpec);
+
+ // 3. Add Part Spec & Range Spec as child of TOK_WINDOW
+ final RexWindow window = over.getWindow();
+ final ASTNode wPSpecAst = getPSpecAST(window);
+ final ASTNode wRangeAst = getWindowRangeAST(window);
+ if (wPSpecAst != null)
+ wSpec.addChild(wPSpecAst);
+ if (wRangeAst != null)
+ wSpec.addChild(wRangeAst);
+
+
+ return wUDAFAst;
+ }
+
      @Override
      public ASTNode visitCall(RexCall call) {
        if (!deep) {

Modified: hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java?rev=1614525&r1=1614524&r2=1614525&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (original)
+++ hive/branches/cbo/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java Wed Jul 30 00:09:09 2014
@@ -252,10 +252,21 @@ import org.eigenbase.reltype.RelDataType
  import org.eigenbase.rex.RexBuilder;
  import org.eigenbase.rex.RexInputRef;
  import org.eigenbase.rex.RexNode;
+import org.eigenbase.rex.RexWindowBound;
+import org.eigenbase.rex.RexFieldCollation;
+import org.eigenbase.sql.SqlAggFunction;
+import org.eigenbase.sql.SqlWindow;
+import org.eigenbase.sql.parser.SqlParserPos;
+import org.eigenbase.sql.type.SqlTypeName;
+import org.eigenbase.sql.SqlCall;
+import org.eigenbase.sql.SqlKind;
+import org.eigenbase.sql.SqlNode;
+import org.eigenbase.sql.SqlLiteral;
  import org.eigenbase.util.CompositeList;

  import com.google.common.base.Function;
  import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableList.Builder;
  import com.google.common.collect.ImmutableMap;
  import com.google.common.collect.Iterables;
  import com.google.common.collect.Lists;
@@ -11762,7 +11773,6 @@ public class SemanticAnalyzer extends Ba
          && !queryProperties.hasClusterBy()
          && !queryProperties.hasDistributeBy()
          && !queryProperties.hasSortBy()
- && !queryProperties.hasWindowing()
          && !queryProperties.hasPTF()
          && !queryProperties.usesScript()
          && !queryProperties.hasMultiDestQuery()
@@ -12628,6 +12638,162 @@ public class SemanticAnalyzer extends Ba
        return sortRel;
      }

+ List<RexNode> getPartitionKeys(PartitionSpec ps, RexNodeConverter converter, RowResolver inputRR)
+ throws SemanticException {
+ List<RexNode> pKeys = new ArrayList<RexNode>();
+ if (ps != null) {
+ List<PartitionExpression> pExprs = ps.getExpressions();
+ for (PartitionExpression pExpr : pExprs) {
+ TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR);
+ tcCtx.setAllowStatefulFunctions(true);
+ ExprNodeDesc exp = genExprNodeDesc(pExpr.getExpression(), inputRR, tcCtx);
+ pKeys.add(converter.convert(exp));
+ }
+ }
+
+ return pKeys;
+ }
+
+ List<RexFieldCollation> getOrderKeys(OrderSpec os, RexNodeConverter converter, RowResolver inputRR) throws SemanticException {
+ List<RexFieldCollation> oKeys = new ArrayList<RexFieldCollation>();
+ if (os != null) {
+ List<OrderExpression> oExprs = os.getExpressions();
+ for (OrderExpression oExpr : oExprs) {
+ TypeCheckCtx tcCtx = new TypeCheckCtx(inputRR);
+ tcCtx.setAllowStatefulFunctions(true);
+ ExprNodeDesc exp = genExprNodeDesc(oExpr.getExpression(), inputRR, tcCtx);
+ RexNode ordExp = converter.convert(exp);
+ Set<SqlKind> flags = new HashSet<SqlKind>();
+ if (oExpr.getOrder() == org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.Order.DESC)
+ flags.add(SqlKind.DESCENDING);
+ oKeys.add(new RexFieldCollation(ordExp, flags));
+ }
+ }
+
+ return oKeys;
+ }
+
+ RexWindowBound getBound(BoundarySpec bs, RexNodeConverter converter) {
+ RexWindowBound rwb = null;
+
+ if (bs != null) {
+ SqlNode sn = null;
+ SqlParserPos pos = new SqlParserPos(1, 1);
+ SqlNode amt = bs.getAmt() == 0 ? null : SqlLiteral.createExactNumeric(
+ String.valueOf(bs.getAmt()), new SqlParserPos(2, 2));
+ RexNode amtLiteral = null;
+ SqlCall sc = null;
+ RexNode rn = null;
+
+ if (amt != null)
+ amtLiteral = m_cluster.getRexBuilder().makeLiteral(new Integer(bs.getAmt()),
+ m_cluster.getTypeFactory().createSqlType(SqlTypeName.INTEGER), true);
+
+ switch (bs.getDirection()) {
+ case PRECEDING:
+ if (amt == null) {
+ rwb = RexWindowBound.create(SqlWindow.createUnboundedPreceding(pos), null);
+ } else {
+ sc = (SqlCall) SqlWindow.createPreceding(amt, pos);
+ rwb = RexWindowBound.create(sc, m_cluster.getRexBuilder().makeCall(sc.getOperator(), amtLiteral));
+ }
+ break;
+
+ case CURRENT:
+ rwb = RexWindowBound.create(SqlWindow.createCurrentRow(new SqlParserPos(1, 1)), null);
+ break;
+
+ case FOLLOWING:
+ if (amt == null) {
+ rwb = RexWindowBound.create(SqlWindow.createUnboundedFollowing(new SqlParserPos(1, 1)), null);
+ } else {
+ sc = (SqlCall) SqlWindow.createFollowing(amt, pos);
+ rwb = RexWindowBound.create(sc, m_cluster.getRexBuilder().makeCall(sc.getOperator(), amtLiteral));
+ }
+ break;
+ }
+ }
+
+ return rwb;
+ }
+
+
+ Pair<RexNode, TypeInfo> genWindowingProj(QB qb, ASTNode windowProjAst, int wndSpecASTIndx, int wndProjPos,
+ RelNode srcRel) throws SemanticException {
+ RexNode w = null;
+ TypeInfo wHiveRetType = null;
+ QBParseInfo qbp = getQBParseInfo(qb);
+ WindowingSpec wSpec = qb.getAllWindowingSpecs().values().iterator().next();
+
+ if (wSpec != null) {
+ // 1. Get valid Window Function Spec
+ // NOTE: WindowSpec uses alias "_wcol0","_wcol1"... for
+ // WindowFunctionSpec
+ wSpec.validateAndMakeEffective();
+ WindowExpressionSpec wExpSpec = wSpec.aliasToWdwExpr.get("_wcol" + wndProjPos);
+ //TODO: Throw exception if wExpSpec is not of type WindowFunctionSpec
+ if (wExpSpec instanceof WindowFunctionSpec) {
+
+ // 2. Get Hive Aggregate Info
+ AggInfo hiveAggInfo = getHiveAggInfo(windowProjAst, wndSpecASTIndx - 1,
+ this.m_relToHiveRR.get(srcRel));
+
+ // 3. Get Optiq Return type for Agg Fn
+ wHiveRetType = hiveAggInfo.m_returnType;
+ RelDataType optiqAggFnRetType = TypeConverter.convert(hiveAggInfo.m_returnType,
+ this.m_cluster.getTypeFactory());
+
+ // 4. Convert Agg Fn args to Optiq
+ ImmutableMap<String, Integer> posMap = this.m_relToHiveColNameOptiqPosMap.get(srcRel);
+ RexNodeConverter converter = new RexNodeConverter(this.m_cluster, srcRel.getRowType(),
+ posMap, 0, false);
+ Builder<RexNode> optiqAggFnArgsBldr = ImmutableList.<RexNode> builder();
+ Builder<RelDataType> optiqAggFnArgsTypeBldr = ImmutableList.<RelDataType> builder();
+ RexNode rexNd = null;
+ for (int i = 0; i < hiveAggInfo.m_aggParams.size(); i++) {
+ optiqAggFnArgsBldr.add(converter.convert(hiveAggInfo.m_aggParams.get(i)));
+ optiqAggFnArgsTypeBldr.add(TypeConverter.convert(hiveAggInfo.m_aggParams.get(i)
+ .getTypeInfo(), this.m_cluster.getTypeFactory()));
+ }
+ ImmutableList<RexNode> optiqAggFnArgs = optiqAggFnArgsBldr.build();
+ ImmutableList<RelDataType> optiqAggFnArgsType = optiqAggFnArgsTypeBldr.build();
+
+ // 5. Get Optiq Agg Fn
+ final SqlAggFunction optiqAggFn = SqlFunctionConverter.getOptiqAggFn(hiveAggInfo.m_udfName,
+ optiqAggFnArgsType, optiqAggFnRetType);
+
+ // 6. Translate Window spec
+ RowResolver inputRR = m_relToHiveRR.get(srcRel);
+ WindowSpec wndSpec = ((WindowFunctionSpec) wExpSpec).getWindowSpec();
+ List<RexNode> partitionKeys = getPartitionKeys(wndSpec.getPartition(), converter, inputRR);
+ List<RexFieldCollation> orderKeys = getOrderKeys(wndSpec.getOrder(), converter, inputRR);
+ RexWindowBound upperBound = getBound(wndSpec.windowFrame.start, converter);
+ RexWindowBound lowerBound = getBound(wndSpec.windowFrame.end, converter);
+ boolean isRows = ((wndSpec.windowFrame.start instanceof RangeBoundarySpec) || (wndSpec.windowFrame.end instanceof RangeBoundarySpec)) ? true
+ : false;
+
+ w = m_cluster.getRexBuilder().makeOver(optiqAggFnRetType, optiqAggFn, optiqAggFnArgs,
+ partitionKeys, ImmutableList.<RexFieldCollation> copyOf(orderKeys), lowerBound,
+ upperBound, isRows, true, false);
+ } else {
+ //TODO: Convert to Semantic Exception
+ throw new RuntimeException("Unsupported window Spec");
+ }
+ }
+
+ return new Pair(w, wHiveRetType);
+ }
+
+ int getWindowSpecIndx(ASTNode wndAST) {
+ int wndASTIndx = -1;
+ int wi = wndAST.getChildCount() - 1;
+ if (wi <= 0 || (wndAST.getChild(wi).getType() != HiveParser.TOK_WINDOWSPEC)) {
+ wi = -1;
+ }
+
+ return wi;
+ }
+
      /**
       * NOTE: there can only be one select caluse since we don't handle multi
       * destination insert.
@@ -12638,6 +12804,7 @@ public class SemanticAnalyzer extends Ba
          throws SemanticException {
        boolean subQuery;
        ArrayList<ExprNodeDesc> col_list = new ArrayList<ExprNodeDesc>();
+ ArrayList<Pair<Integer, RexNode>> windowingRexNodes = new ArrayList<Pair<Integer, RexNode>>();

        // 1. Get Select Expression List
        QBParseInfo qbp = getQBParseInfo(qb);
@@ -12688,6 +12855,7 @@ public class SemanticAnalyzer extends Ba
        // 6. Iterate over all expression (after SELECT)
        ASTNode exprList = selExprList;
        int startPosn = posn;
+ int wndProjPos = 0;
        List<String> tabAliasesForAllProjs = getTabAliases(inputRR);
        for (int i = startPosn; i < exprList.getChildCount(); ++i) {

@@ -12695,11 +12863,38 @@ public class SemanticAnalyzer extends Ba
          ASTNode child = (ASTNode) exprList.getChild(i);
          boolean hasAsClause = (!isInTransform) && (child.getChildCount() == 2);

- // 6.2 bail out if it is windowing spec
- boolean isWindowSpec = child.getChildCount() == 3 ? (child.getChild(2)
- .getType() == HiveParser.TOK_WINDOWSPEC) : false;
- if (isWindowSpec)
- throw new RuntimeException("Windowing is not supported yet");
+
+ // 6.2 Handle windowing spec
+ int wndSpecASTIndx = -1;
+ //TODO: is the check ((child.getChildCount() == 1) || hasAsClause) needed?
+ boolean isWindowSpec = (((child.getChildCount() == 1) || hasAsClause) && child.getChild(0)
+ .getType() == HiveParser.TOK_FUNCTION) ? ((wndSpecASTIndx = getWindowSpecIndx((ASTNode) child
+ .getChild(0))) > 0) : false;
+ if (isWindowSpec) {
+ Pair<RexNode, TypeInfo> wtp = genWindowingProj(qb, (ASTNode) child.getChild(0), wndSpecASTIndx,
+ wndProjPos, srcRel);
+ windowingRexNodes.add(new Pair(pos, wtp.getFirst()));
+
+ // 6.2.1 Check if window expr has alias
+ String colAlias = null;
+ ASTNode tabOrColAst = (ASTNode) child.getChild(1);
+ if (tabOrColAst != null)
+ colAlias = BaseSemanticAnalyzer.getUnescapedName(tabOrColAst);
+
+ // 6.2.2 Update Output Row Schema
+ ColumnInfo oColInfo = new ColumnInfo(getColumnInternalName(pos), wtp.getSecond(), null,
+ false);
+ if (colAlias != null) {
+ out_rwsch.checkColumn(null, colAlias);
+ out_rwsch.put(null, colAlias, oColInfo);
+ } else {
+ out_rwsch.putExpression(child, oColInfo);
+ }
+
+ pos = Integer.valueOf(pos.intValue() + 1);
+ wndProjPos++;
+ continue;
+ }

          // 6.3 EXPR AS (ALIAS,...) parses, but is only allowed for UDTF's
          // This check is not needed and invalid when there is a transform b/c
@@ -12805,9 +13000,15 @@ public class SemanticAnalyzer extends Ba
        for (ExprNodeDesc colExpr : col_list) {
          optiqColLst.add(rexNodeConv.convert(colExpr));
        }
+
+ // 9. Add windowing Proj Names
+ for (Pair<Integer, RexNode> wndPair : windowingRexNodes) {
+ optiqColLst.add(wndPair.getFirst(), wndPair.getSecond());
+ columnNames.add(getColumnInternalName(wndPair.getFirst()));
+ }

- // 9. Construct Hive Project Rel
- // 9.1. Prepend column names with '_o_'
+ // 10. Construct Hive Project Rel
+ // 10.1. Prepend column names with '_o_'
        /*
         * Hive treats names that start with '_c' as internalNames; so change the
         * names so we don't run into this issue when converting back to Hive AST.
@@ -12818,11 +13019,11 @@ public class SemanticAnalyzer extends Ba
                return "_o_" + hName;
              }
            });
- // 9.2 Build Optiq Rel Node for project using converted projections & col
+ // 10.2 Build Optiq Rel Node for project using converted projections & col
        // names
        HiveRel selRel = HiveProjectRel.create(srcRel, optiqColLst, oFieldNames);

- // 10. Keep track of colname-to-posmap && RR for new select
+ // 11. Keep track of colname-to-posmap && RR for new select
        this.m_relToHiveColNameOptiqPosMap.put(selRel,
            buildHiveToOptiqColumnMap(out_rwsch, selRel));
        this.m_relToHiveRR.put(selRel, out_rwsch);

Modified: hive/branches/cbo/ql/src/test/queries/clientpositive/cbo_correctness.q
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/queries/clientpositive/cbo_correctness.q?rev=1614525&r1=1614524&r2=1614525&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/queries/clientpositive/cbo_correctness.q (original)
+++ hive/branches/cbo/ql/src/test/queries/clientpositive/cbo_correctness.q Wed Jul 30 00:09:09 2014
@@ -172,3 +172,13 @@ select count(distinct c_int) as a, avg(c
  select count(distinct c_int) as a, avg(c_float) from t1 group by c_int;
  select count(distinct c_int) as a, avg(c_float) from t1 group by c_float, c_int;

+-- 9. Test Windowing Functions
+select count(c_int) over() from t1;
+select * from (select count(c_int) over() from t1) t1;
+select count(c_int) over(), sum(c_float) over() from t1;
+select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1;
+select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1;
+select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1;
+
+
+

Modified: hive/branches/cbo/ql/src/test/results/clientpositive/cbo_correctness.q.out
URL: http://svn.apache.org/viewvc/hive/branches/cbo/ql/src/test/results/clientpositive/cbo_correctness.q.out?rev=1614525&r1=1614524&r2=1614525&view=diff
==============================================================================
--- hive/branches/cbo/ql/src/test/results/clientpositive/cbo_correctness.q.out (original)
+++ hive/branches/cbo/ql/src/test/results/clientpositive/cbo_correctness.q.out Wed Jul 30 00:09:09 2014
@@ -15776,3 +15776,173 @@ POSTHOOK: Input: default@t1
  #### A masked pattern was here ####
  0 NULL
  1 1.0
+PREHOOK: query: -- 9. Test Windowing Functions
+select count(c_int) over() from t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: -- 9. Test Windowing Functions
+select count(c_int) over() from t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+PREHOOK: query: select * from (select count(c_int) over() from t1) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select count(c_int) over() from t1) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+PREHOOK: query: select count(c_int) over(), sum(c_float) over() from t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select count(c_int) over(), sum(c_float) over() from t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+18 18.0
+PREHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select x from (select count(c_int) over() as x, sum(c_float) over() from t1) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+18
+PREHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select * from (select max(c_int) over (partition by key order by value Rows UNBOUNDED PRECEDING), min(c_int) over (partition by key order by value rows current row), count(c_int) over(partition by key order by value ROWS 1 PRECEDING), avg(value) over (partition by key order by value Rows between unbounded preceding and unbounded following), sum(value) over (partition by key order by value rows between unbounded preceding and current row), avg(c_float) over (partition by key order by value Rows between 1 preceding and unbounded following), sum(c_float) over (partition by key order by value rows between 1 preceding and current row), max(c_float) over (partition by key order by value rows between 1 preceding and unbounded following), min(c_float) over (partition by key order by value rows between 1 preceding and 1 following) from t1) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1 1 1 1.0 1.0 1.0 1.0 1.0 1.0
+1 1 2 1.0 2.0 1.0 2.0 1.0 1.0
+1 1 1 1.0 1.0 1.0 1.0 1.0 1.0
+1 1 2 1.0 2.0 1.0 2.0 1.0 1.0
+1 1 1 1.0 1.0 1.0 1.0 1.0 1.0
+1 1 2 1.0 2.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 3.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 4.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 5.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 6.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 7.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 8.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 9.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 10.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 11.0 1.0 2.0 1.0 1.0
+1 1 2 1.0 12.0 1.0 2.0 1.0 1.0
+1 1 1 1.0 1.0 1.0 1.0 1.0 1.0
+1 1 2 1.0 2.0 1.0 2.0 1.0 1.0
+NULL NULL 0 NULL 0.0 NULL NULL NULL NULL
+NULL NULL 0 NULL 0.0 NULL NULL NULL NULL
+PREHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1
+PREHOOK: type: QUERY
+PREHOOK: Input: default@t1
+#### A masked pattern was here ####
+POSTHOOK: query: select i, a, h, b, c, d, e, f, g, a as x, a +1 as y from (select max(c_int) over (partition by key order by value range UNBOUNDED PRECEDING) a, min(c_int) over (partition by key order by value range current row) b, count(c_int) over(partition by key order by value range 1 PRECEDING) c, avg(value) over (partition by key order by value range between unbounded preceding and unbounded following) d, sum(value) over (partition by key order by value range between unbounded preceding and current row) e, avg(c_float) over (partition by key order by value range between 1 preceding and unbounded following) f, sum(c_float) over (partition by key order by value range between 1 preceding and current row) g, max(c_float) over (partition by key order by value range between 1 preceding and unbounded following) h, min(c_float) over (partition by key order by value range between 1 preceding and 1 following) i from t1) t1
+POSTHOOK: type: QUERY
+POSTHOOK: Input: default@t1
+#### A masked pattern was here ####
+1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2
+1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2
+1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2
+1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 12 1.0 12.0 1.0 12.0 1 2
+1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2
+1.0 1 1.0 1 2 1.0 2.0 1.0 2.0 1 2
+NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL
+NULL NULL NULL NULL 0 NULL 0.0 NULL NULL NULL NULL

Search Discussions

Related Discussions

Discussion Navigation
viewthread | post
Discussion Overview
groupcommits @
categorieshive, hadoop
postedJul 30, '14 at 12:09a
activeJul 30, '14 at 12:09a
posts1
users1
websitehive.apache.org

1 user in discussion

Gunther: 1 post

People

Translate

site design / logo © 2021 Grokbase